From: Josef Bacik <josef(a)toxicpanda.com>
commit e076ab2a2ca70a0270232067cd49f76cd92efe64 upstream.
Commit 38d715f494f2 ("btrfs: use btrfs_start_delalloc_roots in
shrink_delalloc") cleaned up how we do delalloc shrinking by utilizing
some infrastructure we have in place to flush inodes that we use for
device replace and snapshot. However this introduced a pretty serious
performance regression. To reproduce the user untarred the source
tarball of Firefox (360MiB xz compressed/1.5GiB uncompressed), and would
see it take anywhere from 5 to 20 times as long to untar in 5.10
compared to 5.9. This was observed on fast devices (SSD and better) and
not on HDD.
The root cause is because before we would generally use the normal
writeback path to reclaim delalloc space, and for this we would provide
it with the number of pages we wanted to flush. The referenced commit
changed this to flush that many inodes, which drastically increased the
amount of space we were flushing in certain cases, which severely
affected performance.
We cannot revert this patch unfortunately because of 3d45f221ce62
("btrfs: fix deadlock when cloning inline extent and low on free
metadata space") which requires the ability to skip flushing inodes that
are being cloned in certain scenarios, which means we need to keep using
our flushing infrastructure or risk re-introducing the deadlock.
Instead to fix this problem we can go back to providing
btrfs_start_delalloc_roots with a number of pages to flush, and then set
up a writeback_control and utilize sync_inode() to handle the flushing
for us. This gives us the same behavior we had prior to the fix, while
still allowing us to avoid the deadlock that was fixed by Filipe. I
redid the users original test and got the following results on one of
our test machines (256GiB of ram, 56 cores, 2TiB Intel NVMe drive)
5.9 0m54.258s
5.10 1m26.212s
5.10+patch 0m38.800s
5.10+patch is significantly faster than plain 5.9 because of my patch
series "Change data reservations to use the ticketing infra" which
contained the patch that introduced the regression, but generally
improved the overall ENOSPC flushing mechanisms.
Additional testing on consumer-grade SSD (8GiB ram, 8 CPU) confirm
the results:
5.10.5 4m00s
5.10.5+patch 1m08s
5.11-rc2 5m14s
5.11-rc2+patch 1m30s
Reported-by: René Rebe <rene(a)exactcode.de>
Fixes: 38d715f494f2 ("btrfs: use btrfs_start_delalloc_roots in shrink_delalloc")
CC: stable(a)vger.kernel.org # 5.10
Signed-off-by: Josef Bacik <josef(a)toxicpanda.com>
Tested-by: David Sterba <dsterba(a)suse.com>
Reviewed-by: David Sterba <dsterba(a)suse.com>
[ add my test results ]
Signed-off-by: David Sterba <dsterba(a)suse.com>
---
fs/btrfs/inode.c | 61 +++++++++++++++++++++++++++++++------------
fs/btrfs/space-info.c | 4 ++-
2 files changed, 47 insertions(+), 18 deletions(-)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 7e8d8169779d..a6e527387b0a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9389,7 +9389,9 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
*/
-static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot)
+static int start_delalloc_inodes(struct btrfs_root *root,
+ struct writeback_control *wbc,
+ bool snapshot)
{
struct btrfs_inode *binode;
struct inode *inode;
@@ -9397,6 +9399,7 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
struct list_head works;
struct list_head splice;
int ret = 0;
+ bool full_flush = wbc->nr_to_write == LONG_MAX;
INIT_LIST_HEAD(&works);
INIT_LIST_HEAD(&splice);
@@ -9420,18 +9423,24 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
if (snapshot)
set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
&binode->runtime_flags);
- work = btrfs_alloc_delalloc_work(inode);
- if (!work) {
- iput(inode);
- ret = -ENOMEM;
- goto out;
- }
- list_add_tail(&work->list, &works);
- btrfs_queue_work(root->fs_info->flush_workers,
- &work->work);
- if (*nr != U64_MAX) {
- (*nr)--;
- if (*nr == 0)
+ if (full_flush) {
+ work = btrfs_alloc_delalloc_work(inode);
+ if (!work) {
+ iput(inode);
+ ret = -ENOMEM;
+ goto out;
+ }
+ list_add_tail(&work->list, &works);
+ btrfs_queue_work(root->fs_info->flush_workers,
+ &work->work);
+ } else {
+ ret = sync_inode(inode, wbc);
+ if (!ret &&
+ test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+ &BTRFS_I(inode)->runtime_flags))
+ ret = sync_inode(inode, wbc);
+ btrfs_add_delayed_iput(inode);
+ if (ret || wbc->nr_to_write <= 0)
goto out;
}
cond_resched();
@@ -9457,17 +9466,28 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
{
+ struct writeback_control wbc = {
+ .nr_to_write = LONG_MAX,
+ .sync_mode = WB_SYNC_NONE,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ };
struct btrfs_fs_info *fs_info = root->fs_info;
- u64 nr = U64_MAX;
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
return -EROFS;
- return start_delalloc_inodes(root, &nr, true);
+ return start_delalloc_inodes(root, &wbc, true);
}
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
{
+ struct writeback_control wbc = {
+ .nr_to_write = (nr == U64_MAX) ? LONG_MAX : (unsigned long)nr,
+ .sync_mode = WB_SYNC_NONE,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ };
struct btrfs_root *root;
struct list_head splice;
int ret;
@@ -9481,6 +9501,13 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
spin_lock(&fs_info->delalloc_root_lock);
list_splice_init(&fs_info->delalloc_roots, &splice);
while (!list_empty(&splice) && nr) {
+ /*
+ * Reset nr_to_write here so we know that we're doing a full
+ * flush.
+ */
+ if (nr == U64_MAX)
+ wbc.nr_to_write = LONG_MAX;
+
root = list_first_entry(&splice, struct btrfs_root,
delalloc_root);
root = btrfs_grab_root(root);
@@ -9489,9 +9516,9 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
&fs_info->delalloc_roots);
spin_unlock(&fs_info->delalloc_root_lock);
- ret = start_delalloc_inodes(root, &nr, false);
+ ret = start_delalloc_inodes(root, &wbc, false);
btrfs_put_root(root);
- if (ret < 0)
+ if (ret < 0 || wbc.nr_to_write <= 0)
goto out;
spin_lock(&fs_info->delalloc_root_lock);
}
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 64099565ab8f..b43777c886d4 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -532,7 +532,9 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
loops = 0;
while ((delalloc_bytes || dio_bytes) && loops < 3) {
- btrfs_start_delalloc_roots(fs_info, items);
+ u64 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
+
+ btrfs_start_delalloc_roots(fs_info, nr_pages);
loops++;
if (wait_ordered && !trans) {
--
2.29.2
This is a note to let you know that I've just added the patch titled
xhci: make sure TRB is fully written before giving it to the
to my usb git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
in the usb-linus branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will hopefully also be merged in Linus's tree for the
next -rc kernel release.
If you have any questions about this process, please let me know.
>From 576667bad341516edc4e18eb85acb0a2b4c9c9d9 Mon Sep 17 00:00:00 2001
From: Mathias Nyman <mathias.nyman(a)linux.intel.com>
Date: Fri, 15 Jan 2021 18:19:06 +0200
Subject: xhci: make sure TRB is fully written before giving it to the
controller
Once the command ring doorbell is rung the xHC controller will parse all
command TRBs on the command ring that have the cycle bit set properly.
If the driver just started writing the next command TRB to the ring when
hardware finished the previous TRB, then HW might fetch an incomplete TRB
as long as its cycle bit set correctly.
A command TRB is 16 bytes (128 bits) long.
Driver writes the command TRB in four 32 bit chunks, with the chunk
containing the cycle bit last. This does however not guarantee that
chunks actually get written in that order.
This was detected in stress testing when canceling URBs with several
connected USB devices.
Two consecutive "Set TR Dequeue pointer" commands got queued right
after each other, and the second one was only partially written when
the controller parsed it, causing the dequeue pointer to be set
to bogus values. This was seen as error messages:
"Mismatch between completed Set TR Deq Ptr command & xHCI internal state"
Solution is to add a write memory barrier before writing the cycle bit.
Cc: <stable(a)vger.kernel.org>
Tested-by: Ross Zwisler <zwisler(a)google.com>
Signed-off-by: Mathias Nyman <mathias.nyman(a)linux.intel.com>
Link: https://lore.kernel.org/r/20210115161907.2875631-2-mathias.nyman@linux.inte…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/usb/host/xhci-ring.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5677b81c0915..cf0c93a90200 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2931,6 +2931,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
+ /* make sure TRB is fully written before giving it to the controller */
+ wmb();
trb->field[3] = cpu_to_le32(field4);
trace_xhci_queue_trb(ring, trb);
--
2.30.0
>From version 2.35, binutils can be configured with
--enable-mips-fix-loongson3-llsc=yes, which means it defaults to
-mfix-loongson3-llsc. This breaks labels which might then point at the
wrong instruction.
The workaround to explicitly pass -mno-fix-loongson3-llsc has been
added in Linux version 5.1, but is only enabled when building a Loongson
64 kernel. As vendors might use a common toolchain for building Loongson
and non-Loongson kernels, just move that workaround to
arch/mips/Makefile. At the same time update the comments to reflect the
current status.
Cc: stable(a)vger.kernel.org # 5.1+
Cc: YunQiang Su <syq(a)debian.org>
Signed-off-by: Aurelien Jarno <aurelien(a)aurel32.net>
---
arch/mips/Makefile | 19 +++++++++++++++++++
arch/mips/loongson64/Platform | 22 ----------------------
2 files changed, 19 insertions(+), 22 deletions(-)
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index cd4343edeb11..5ffdd67093bc 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -136,6 +136,25 @@ cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
#
cflags-y += -fno-stack-check
+# binutils from v2.35 when built with --enable-mips-fix-loongson3-llsc=yes,
+# supports an -mfix-loongson3-llsc flag which emits a sync prior to each ll
+# instruction to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h
+# for a description).
+#
+# We disable this in order to prevent the assembler meddling with the
+# instruction that labels refer to, ie. if we label an ll instruction:
+#
+# 1: ll v0, 0(a0)
+#
+# ...then with the assembler fix applied the label may actually point at a sync
+# instruction inserted by the assembler, and if we were using the label in an
+# exception table the table would no longer contain the address of the ll
+# instruction.
+#
+# Avoid this by explicitly disabling that assembler behaviour.
+#
+cflags-y += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
+
#
# CPU-dependent compiler/assembler options for optimization.
#
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
index ec42c5085905..e2354e128d9a 100644
--- a/arch/mips/loongson64/Platform
+++ b/arch/mips/loongson64/Platform
@@ -5,28 +5,6 @@
cflags-$(CONFIG_CPU_LOONGSON64) += -Wa,--trap
-#
-# Some versions of binutils, not currently mainline as of 2019/02/04, support
-# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
-# to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h for a
-# description).
-#
-# We disable this in order to prevent the assembler meddling with the
-# instruction that labels refer to, ie. if we label an ll instruction:
-#
-# 1: ll v0, 0(a0)
-#
-# ...then with the assembler fix applied the label may actually point at a sync
-# instruction inserted by the assembler, and if we were using the label in an
-# exception table the table would no longer contain the address of the ll
-# instruction.
-#
-# Avoid this by explicitly disabling that assembler behaviour. If upstream
-# binutils does not merge support for the flag then we can revisit & remove
-# this later - for now it ensures vendor toolchains don't cause problems.
-#
-cflags-$(CONFIG_CPU_LOONGSON64) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
-
#
# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
# as MIPS64 R2; older versions as just R1. This leaves the possibility open
--
2.29.2
This series hunts the problems discovered after manual enabling of
ARCH_WANT_LD_ORPHAN_WARN. Notably:
- adds the missing PAGE_ALIGNED_DATA() section affecting VDSO
placement (marked for stable);
- stops blind catching of orphan text sections with .text.*
directive;
- properly stops .eh_frame section generation.
Compile and runtime tested on MIPS32R2 CPS board with no issues
using two different toolkits:
- Binutils 2.35.1, GCC 10.2.1 (with Alpine patches);
- LLVM stack: 11.0.0 and from latest Git snapshot.
Since v4 [3]:
- new: drop redundant .text.cps-vec creation and blind inclusion
of orphan text sections via .text.* directive in vmlinux.lds.S;
- don't assert SIZEOF(.rel.dyn) as it's reported that it may be not
empty on certain machines and compilers (Thomas);
- align GOT table like it's done for ARM64;
- new: catch UBSAN's "unnamed data" sections in generic definitions
when building with LD_DEAD_CODE_DATA_ELIMINATION;
- collect Reviewed-bys (Kees, Nathan).
Since v3 [2]:
- fix the third patch as GNU stack emits .rel.dyn into VDSO for
some reason if .cfi_sections is specified.
Since v2 [1]:
- stop discarding .eh_frame and just prevent it from generating
(Kees);
- drop redundant sections assertions (Fangrui);
- place GOT table in .text instead of asserting as it's not empty
when building with LLVM (Nathan);
- catch compound literals in generic definitions when building with
LD_DEAD_CODE_DATA_ELIMINATION (Kees);
- collect two Reviewed-bys (Kees).
Since v1 [0]:
- catch .got entries too as LLD may produce it (Nathan);
- check for unwanted sections to be zero-sized instead of
discarding (Fangrui).
[0] https://lore.kernel.org/linux-mips/20210104121729.46981-1-alobakin@pm.me
[1] https://lore.kernel.org/linux-mips/20210106200713.31840-1-alobakin@pm.me
[2] https://lore.kernel.org/linux-mips/20210107115120.281008-1-alobakin@pm.me
[3] https://lore.kernel.org/linux-mips/20210107123331.354075-1-alobakin@pm.me
Alexander Lobakin (9):
MIPS: vmlinux.lds.S: add missing PAGE_ALIGNED_DATA() section
MIPS: CPS: don't create redundant .text.cps-vec section
MIPS: vmlinux.lds.S: add ".gnu.attributes" to DISCARDS
MIPS: properly stop .eh_frame generation
MIPS: vmlinux.lds.S: explicitly catch .rel.dyn symbols
MIPS: vmlinux.lds.S: explicitly declare .got table
vmlinux.lds.h: catch compound literals into data and BSS
vmlinux.lds.h: catch UBSAN's "unnamed data" into data
MIPS: select ARCH_WANT_LD_ORPHAN_WARN
arch/mips/Kconfig | 1 +
arch/mips/include/asm/asm.h | 18 ++++++++++++++++++
arch/mips/kernel/cps-vec.S | 1 -
arch/mips/kernel/vmlinux.lds.S | 11 +++++++++--
include/asm-generic/vmlinux.lds.h | 6 +++---
5 files changed, 31 insertions(+), 6 deletions(-)
--
2.30.0
The page_huge_active() can be called from scan_movable_pages() which
do not hold a reference count to the HugeTLB page. So when we call
page_huge_active() from scan_movable_pages(), the HugeTLB page can
be freed parallel. Then we will trigger a BUG_ON which is in the
page_huge_active() when CONFIG_DEBUG_VM is enabled. Just remove the
VM_BUG_ON_PAGE.
Fixes: 7e1f049efb86 ("mm: hugetlb: cleanup using paeg_huge_active()")
Signed-off-by: Muchun Song <songmuchun(a)bytedance.com>
Reviewed-by: Mike Kravetz <mike.kravetz(a)oracle.com>
Acked-by: Michal Hocko <mhocko(a)suse.com>
Cc: stable(a)vger.kernel.org
---
mm/hugetlb.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f2cef3cf1f85..2d671f3743f5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1360,8 +1360,7 @@ struct hstate *size_to_hstate(unsigned long size)
*/
bool page_huge_active(struct page *page)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
- return PageHead(page) && PagePrivate(&page[1]);
+ return PageHeadHuge(page) && PagePrivate(&page[1]);
}
/* never called for tail page */
--
2.11.0
There is a race between isolate_huge_page() and __free_huge_page().
CPU0: CPU1:
if (PageHuge(page))
put_page(page)
__free_huge_page(page)
spin_lock(&hugetlb_lock)
update_and_free_page(page)
set_compound_page_dtor(page,
NULL_COMPOUND_DTOR)
spin_unlock(&hugetlb_lock)
isolate_huge_page(page)
// trigger BUG_ON
VM_BUG_ON_PAGE(!PageHead(page), page)
spin_lock(&hugetlb_lock)
page_huge_active(page)
// trigger BUG_ON
VM_BUG_ON_PAGE(!PageHuge(page), page)
spin_unlock(&hugetlb_lock)
When we isolate a HugeTLB page on CPU0. Meanwhile, we free it to the
buddy allocator on CPU1. Then, we can trigger a BUG_ON on CPU0. Because
it is already freed to the buddy allocator.
Fixes: c8721bbbdd36 ("mm: memory-hotplug: enable memory hotplug to handle hugepage")
Signed-off-by: Muchun Song <songmuchun(a)bytedance.com>
Reviewed-by: Mike Kravetz <mike.kravetz(a)oracle.com>
Acked-by: Michal Hocko <mhocko(a)suse.com>
Cc: stable(a)vger.kernel.org
---
mm/hugetlb.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b99fe4a2b435..f2cef3cf1f85 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5572,9 +5572,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
{
bool ret = true;
- VM_BUG_ON_PAGE(!PageHead(page), page);
spin_lock(&hugetlb_lock);
- if (!page_huge_active(page) || !get_page_unless_zero(page)) {
+ if (!PageHeadHuge(page) || !page_huge_active(page) ||
+ !get_page_unless_zero(page)) {
ret = false;
goto unlock;
}
--
2.11.0
commit ad4fddef5f2345aa9214e979febe2f47639c10d9 upstream.
When building mips tinyconfig with clang the following error show up:
WARNING: modpost: vmlinux.o(.text+0x1940c): Section mismatch in reference from the function r4k_cache_init() to the function .init.text:loongson3_sc_init()
The function r4k_cache_init() references
the function __init loongson3_sc_init().
This is often because r4k_cache_init lacks a __init
annotation or the annotation of loongson3_sc_init is wrong.
Remove marked __init from function loongson3_sc_init(),
mips_sc_probe_cm3(), and mips_sc_probe().
Cc: <stable(a)vger.kernel.org> # v5.4+
Signed-off-by: Anders Roxell <anders.roxell(a)linaro.org>
Reviewed-by: Nick Desaulniers <ndesaulniers(a)google.com>
Signed-off-by: Thomas Bogendoerfer <tsbogend(a)alpha.franken.de>
---
arch/mips/mm/c-r4k.c | 2 +-
arch/mips/mm/sc-mips.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 9cede7ce37e6..c9644c38ec28 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1609,7 +1609,7 @@ static void __init loongson2_sc_init(void)
c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
-static void __init loongson3_sc_init(void)
+static void loongson3_sc_init(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int config2, lsize;
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index dd0a5becaabd..06ec304ad4d1 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -146,7 +146,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
return 1;
}
-static int __init mips_sc_probe_cm3(void)
+static int mips_sc_probe_cm3(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned long cfg = read_gcr_l2_config();
@@ -180,7 +180,7 @@ static int __init mips_sc_probe_cm3(void)
return 0;
}
-static inline int __init mips_sc_probe(void)
+static inline int mips_sc_probe(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
unsigned int config1, config2;
--
2.29.2
If a new hugetlb page is allocated during fallocate it will not be
marked as active (set_page_huge_active) which will result in a later
isolate_huge_page failure when the page migration code would like to
move that page. Such a failure would be unexpected and wrong.
Only export set_page_huge_active, just leave clear_page_huge_active
as static. Because there are no external users.
Fixes: 70c3547e36f5 (hugetlbfs: add hugetlbfs_fallocate())
Signed-off-by: Muchun Song <songmuchun(a)bytedance.com>
Acked-by: Michal Hocko <mhocko(a)suse.com>
Reviewed-by: Mike Kravetz <mike.kravetz(a)oracle.com>
Reviewed-by: Oscar Salvador <osalvador(a)suse.de>
Cc: stable(a)vger.kernel.org
---
fs/hugetlbfs/inode.c | 3 ++-
include/linux/hugetlb.h | 2 ++
mm/hugetlb.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index b5c109703daa..21c20fd5f9ee 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -735,9 +735,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ set_page_huge_active(page);
/*
* unlock_page because locked by add_to_page_cache()
- * page_put due to reference from alloc_huge_page()
+ * put_page() due to reference from alloc_huge_page()
*/
unlock_page(page);
put_page(page);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ebca2ef02212..b5807f23caf8 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -770,6 +770,8 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
}
#endif
+void set_page_huge_active(struct page *page);
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1f3bf1710b66..4741d60f8955 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1348,7 +1348,7 @@ bool page_huge_active(struct page *page)
}
/* never called for tail page */
-static void set_page_huge_active(struct page *page)
+void set_page_huge_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
SetPagePrivate(&page[1]);
--
2.11.0
The patch below does not apply to the 5.10-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From bee749b187ac57d1faf00b2ab356ff322230fce8 Mon Sep 17 00:00:00 2001
From: Pavel Begunkov <asml.silence(a)gmail.com>
Date: Wed, 25 Nov 2020 02:19:23 +0000
Subject: [PATCH] io_uring: fix files cancellation
io_uring_cancel_files()'s task check condition mistakenly got flipped.
1. There can't be a request in the inflight list without
IO_WQ_WORK_FILES, kill this check to keep the whole condition simpler.
2. Also, don't call the function for files==NULL to not do such a check,
all that staff is already handled well by its counter part,
__io_uring_cancel_task_requests().
With that just flip the task check.
Also, it iowq-cancels all request of current task there, don't forget to
set right ->files into struct io_task_cancel.
Fixes: c1973b38bf639 ("io_uring: cancel only requests of current task")
Reported-by: syzbot+c0d52d0b3c0c3ffb9525(a)syzkaller.appspotmail.com
Signed-off-by: Pavel Begunkov <asml.silence(a)gmail.com>
Signed-off-by: Jens Axboe <axboe(a)kernel.dk>
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e66888d45778..f47de27e5125 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -8688,15 +8688,14 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
struct files_struct *files)
{
while (!list_empty_careful(&ctx->inflight_list)) {
- struct io_task_cancel cancel = { .task = task, .files = NULL, };
+ struct io_task_cancel cancel = { .task = task, .files = files };
struct io_kiocb *req;
DEFINE_WAIT(wait);
bool found = false;
spin_lock_irq(&ctx->inflight_lock);
list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
- if (req->task == task &&
- (req->work.flags & IO_WQ_WORK_FILES) &&
+ if (req->task != task ||
req->work.identity->files != files)
continue;
found = true;
@@ -8768,10 +8767,11 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
io_cancel_defer_files(ctx, task, files);
io_cqring_overflow_flush(ctx, true, task, files);
- io_uring_cancel_files(ctx, task, files);
if (!files)
__io_uring_cancel_task_requests(ctx, task);
+ else
+ io_uring_cancel_files(ctx, task, files);
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
atomic_dec(&task->io_uring->in_idle);
The patch below does not apply to the 5.10-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 4aa84f2ffa81f71e15e5cffc2cc6090dbee78f8e Mon Sep 17 00:00:00 2001
From: Pavel Begunkov <asml.silence(a)gmail.com>
Date: Thu, 7 Jan 2021 03:15:42 +0000
Subject: [PATCH] io_uring: dont kill fasync under completion_lock
CPU0 CPU1
---- ----
lock(&new->fa_lock);
local_irq_disable();
lock(&ctx->completion_lock);
lock(&new->fa_lock);
<Interrupt>
lock(&ctx->completion_lock);
*** DEADLOCK ***
Move kill_fasync() out of io_commit_cqring() to io_cqring_ev_posted(),
so it doesn't hold completion_lock while doing it. That saves from the
reported deadlock, and it's just nice to shorten the locking time and
untangle nested locks (compl_lock -> wq_head::lock).
Reported-by: syzbot+91ca3f25bd7f795f019c(a)syzkaller.appspotmail.com
Signed-off-by: Pavel Begunkov <asml.silence(a)gmail.com>
Signed-off-by: Jens Axboe <axboe(a)kernel.dk>
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 91e517ad1421..401316fe2ae2 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1345,11 +1345,6 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
/* order cqe stores with ring update */
smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
-
- if (wq_has_sleeper(&ctx->cq_wait)) {
- wake_up_interruptible(&ctx->cq_wait);
- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
- }
}
static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
@@ -1711,6 +1706,10 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
wake_up(&ctx->sq_data->wait);
if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1);
+ if (wq_has_sleeper(&ctx->cq_wait)) {
+ wake_up_interruptible(&ctx->cq_wait);
+ kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+ }
}
static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
@@ -1721,6 +1720,10 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
}
if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1);
+ if (wq_has_sleeper(&ctx->cq_wait)) {
+ wake_up_interruptible(&ctx->cq_wait);
+ kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+ }
}
/* Returns true if there are no backlogged entries after the flush */
The patch below does not apply to the 5.4-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 690d8b917bbe64772cb0b652311bcd50908aea6b Mon Sep 17 00:00:00 2001
From: Douglas Anderson <dianders(a)chromium.org>
Date: Thu, 17 Dec 2020 14:29:12 -0800
Subject: [PATCH] spi: spi-geni-qcom: Fail new xfers if xfer/cancel/abort
pending
If we got a timeout when trying to send an abort command then it means
that we just got 3 timeouts in a row:
1. The original timeout that caused handle_fifo_timeout() to be
called.
2. A one second timeout waiting for the cancel command to finish.
3. A one second timeout waiting for the abort command to finish.
SPI is clocked by the controller, so nothing (aside from a hardware
fault or a totally broken sequencer) should be causing the actual
commands to fail in hardware. However, even though the hardware
itself is not expected to fail (and it'd be hard to predict how we
should handle things if it did), it's easy to hit the timeout case by
simply blocking our interrupt handler from running for a long period
of time. Obviously the system is in pretty bad shape if a interrupt
handler is blocked for > 2 seconds, but there are certainly bugs (even
bugs in other unrelated drivers) that can make this happen.
Let's make things a bit more robust against this case. If we fail to
abort we'll set a flag and then we'll block all future transfers until
we have no more interrupts pending.
Fixes: 561de45f72bd ("spi: spi-geni-qcom: Add SPI driver support for GENI based QUP")
Signed-off-by: Douglas Anderson <dianders(a)chromium.org>
Reviewed-by: Stephen Boyd <swboyd(a)chromium.org>
Link: https://lore.kernel.org/r/20201217142842.v3.2.Ibade998ed587e070388b4bf58801…
Signed-off-by: Mark Brown <broonie(a)kernel.org>
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 096edfbde451..32c053705dec 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -83,6 +83,7 @@ struct spi_geni_master {
spinlock_t lock;
int irq;
bool cs_flag;
+ bool abort_failed;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -141,8 +142,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
- if (!time_left)
+ if (!time_left) {
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+
+ /*
+ * No need for a lock since SPI core has a lock and we never
+ * access this from an interrupt.
+ */
+ mas->abort_failed = true;
+ }
+}
+
+static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 m_irq, m_irq_en;
+
+ if (!mas->abort_failed)
+ return false;
+
+ /*
+ * The only known case where a transfer times out and then a cancel
+ * times out then an abort times out is if something is blocking our
+ * interrupt handler from running. Avoid starting any new transfers
+ * until that sorts itself out.
+ */
+ spin_lock_irq(&mas->lock);
+ m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+ m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
+ spin_unlock_irq(&mas->lock);
+
+ if (m_irq & m_irq_en) {
+ dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
+ m_irq & m_irq_en);
+ return true;
+ }
+
+ /*
+ * If we're here the problem resolved itself so no need to check more
+ * on future transfers.
+ */
+ mas->abort_failed = false;
+
+ return false;
}
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
@@ -158,9 +200,15 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
if (set_flag == mas->cs_flag)
return;
+ pm_runtime_get_sync(mas->dev);
+
+ if (spi_geni_is_abort_still_pending(mas)) {
+ dev_err(mas->dev, "Can't set chip select\n");
+ goto exit;
+ }
+
mas->cs_flag = set_flag;
- pm_runtime_get_sync(mas->dev);
spin_lock_irq(&mas->lock);
reinit_completion(&mas->cs_done);
if (set_flag)
@@ -173,6 +221,7 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
if (!time_left)
handle_fifo_timeout(spi, NULL);
+exit:
pm_runtime_put(mas->dev);
}
@@ -280,6 +329,9 @@ static int spi_geni_prepare_message(struct spi_master *spi,
int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -509,6 +561,9 @@ static int spi_geni_transfer_one(struct spi_master *spi,
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
return 0;
Please apply upstream commit 8f56bc4dc101 ("ionic: start queues before
announcing link up") to 5.10.
When initially discussed, the implied race condition was still
theoretical. Since then, a case has been found to generate the race
issue and cause a failure to bring up the netdev interface by having the
device in a bond and the bond driver trying to bring up the device on
the netif_carrier_on() notification but before the queues have been started.
This should be applied to 5.10 stable.
Thanks,
sln
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 3f9bce7a22a3f8ac9d885c9d75bc45569f24ac8b Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo(a)kernel.org>
Date: Sat, 14 Nov 2020 19:39:05 +0100
Subject: [PATCH] iio: imu: st_lsm6dsx: fix edge-trigger interrupts
If we are using edge IRQs, new samples can arrive while processing
current interrupt since there are no hw guarantees the irq line
stays "low" long enough to properly detect the new interrupt.
In this case the new sample will be missed.
Polling FIFO status register in st_lsm6dsx_handler_thread routine
allow us to read new samples even if the interrupt arrives while
processing previous data and the timeslot where the line is "low"
is too short to be properly detected.
Fixes: 89ca88a7cdf2 ("iio: imu: st_lsm6dsx: support active-low interrupts")
Fixes: 290a6ce11d93 ("iio: imu: add support to lsm6dsx driver")
Signed-off-by: Lorenzo Bianconi <lorenzo(a)kernel.org>
Link: https://lore.kernel.org/r/5e93cda7dc1e665f5685c53ad8e9ea71dbae782d.16053788…
Cc: <Stable(a)vger.kernel.org>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 467214e2e77c..7cedaab096a7 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -2069,19 +2069,35 @@ st_lsm6dsx_report_motion_event(struct st_lsm6dsx_hw *hw)
static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
{
struct st_lsm6dsx_hw *hw = private;
+ int fifo_len = 0, len;
bool event;
- int count;
event = st_lsm6dsx_report_motion_event(hw);
if (!hw->settings->fifo_ops.read_fifo)
return event ? IRQ_HANDLED : IRQ_NONE;
- mutex_lock(&hw->fifo_lock);
- count = hw->settings->fifo_ops.read_fifo(hw);
- mutex_unlock(&hw->fifo_lock);
+ /*
+ * If we are using edge IRQs, new samples can arrive while
+ * processing current interrupt since there are no hw
+ * guarantees the irq line stays "low" long enough to properly
+ * detect the new interrupt. In this case the new sample will
+ * be missed.
+ * Polling FIFO status register allow us to read new
+ * samples even if the interrupt arrives while processing
+ * previous data and the timeslot where the line is "low" is
+ * too short to be properly detected.
+ */
+ do {
+ mutex_lock(&hw->fifo_lock);
+ len = hw->settings->fifo_ops.read_fifo(hw);
+ mutex_unlock(&hw->fifo_lock);
+
+ if (len > 0)
+ fifo_len += len;
+ } while (len > 0);
- return count || event ? IRQ_HANDLED : IRQ_NONE;
+ return fifo_len || event ? IRQ_HANDLED : IRQ_NONE;
}
static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw)
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 20f1431160c6b590cdc269a846fc5a448abf5b98 Mon Sep 17 00:00:00 2001
From: Richard Weinberger <richard(a)nod.at>
Date: Mon, 16 Nov 2020 22:05:30 +0100
Subject: [PATCH] ubifs: wbuf: Don't leak kernel memory to flash
Write buffers use a kmalloc()'ed buffer, they can leak
up to seven bytes of kernel memory to flash if writes are not
aligned.
So use ubifs_pad() to fill these gaps with padding bytes.
This was never a problem while scanning because the scanner logic
manually aligns node lengths and skips over these gaps.
Cc: <stable(a)vger.kernel.org>
Fixes: 1e51764a3c2ac05a2 ("UBIFS: add new flash file system")
Signed-off-by: Richard Weinberger <richard(a)nod.at>
Reviewed-by: Zhihao Cheng <chengzhihao1(a)huawei.com>
Signed-off-by: Richard Weinberger <richard(a)nod.at>
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 2dc933f73165..a9cabb3fa64c 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -319,7 +319,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
{
uint32_t crc;
- ubifs_assert(c, pad >= 0 && !(pad & 7));
+ ubifs_assert(c, pad >= 0);
if (pad >= UBIFS_PAD_NODE_SZ) {
struct ubifs_ch *ch = buf;
@@ -764,6 +764,10 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
* write-buffer.
*/
memcpy(wbuf->buf + wbuf->used, buf, len);
+ if (aligned_len > len) {
+ ubifs_assert(c, aligned_len - len < 8);
+ ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len);
+ }
if (aligned_len == wbuf->avail) {
dbg_io("flush jhead %s wbuf to LEB %d:%d",
@@ -856,13 +860,18 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
}
spin_lock(&wbuf->lock);
- if (aligned_len)
+ if (aligned_len) {
/*
* And now we have what's left and what does not take whole
* max. write unit, so write it to the write-buffer and we are
* done.
*/
memcpy(wbuf->buf, buf + written, len);
+ if (aligned_len > len) {
+ ubifs_assert(c, aligned_len - len < 8);
+ ubifs_pad(c, wbuf->buf + len, aligned_len - len);
+ }
+ }
if (c->leb_size - wbuf->offs >= c->max_write_size)
wbuf->size = c->max_write_size;
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 5626308bb94d9f930aa5f7c77327df4c6daa7759 Mon Sep 17 00:00:00 2001
From: Lukas Wunner <lukas(a)wunner.de>
Date: Mon, 7 Dec 2020 09:17:05 +0100
Subject: [PATCH] spi: pxa2xx: Fix use-after-free on unbind
pxa2xx_spi_remove() accesses the driver's private data after calling
spi_unregister_controller() even though that function releases the last
reference on the spi_controller and thereby frees the private data.
Fix by switching over to the new devm_spi_alloc_master/slave() helper
which keeps the private data accessible until the driver has unbound.
Fixes: 32e5b57232c0 ("spi: pxa2xx: Fix controller unregister order")
Signed-off-by: Lukas Wunner <lukas(a)wunner.de>
Cc: <stable(a)vger.kernel.org> # v2.6.17+: 5e844cc37a5c: spi: Introduce device-managed SPI controller allocation
Cc: <stable(a)vger.kernel.org> # v2.6.17+: 32e5b57232c0: spi: pxa2xx: Fix controller unregister order
Cc: <stable(a)vger.kernel.org> # v2.6.17+
Link: https://lore.kernel.org/r/5764b04d4a6e43069ebb7808f64c2f774ac6f193.16072868…
Signed-off-by: Mark Brown <broonie(a)kernel.org>
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 62a0f0f86553..bd2354fd438d 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1691,9 +1691,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
if (platform_info->is_slave)
- controller = spi_alloc_slave(dev, sizeof(struct driver_data));
+ controller = devm_spi_alloc_slave(dev, sizeof(*drv_data));
else
- controller = spi_alloc_master(dev, sizeof(struct driver_data));
+ controller = devm_spi_alloc_master(dev, sizeof(*drv_data));
if (!controller) {
dev_err(&pdev->dev, "cannot alloc spi_controller\n");
@@ -1916,7 +1916,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
free_irq(ssp->irq, drv_data);
out_error_controller_alloc:
- spi_controller_put(controller);
pxa_ssp_free(ssp);
return status;
}
The patch below does not apply to the 4.19-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 0e53656ad8abc99e0a80c3de611e593ebbf55829 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris(a)chris-wilson.co.uk>
Date: Wed, 16 Dec 2020 09:29:51 +0000
Subject: [PATCH] drm/i915: Fix mismatch between misplaced vma check and vma
insert
When inserting a VMA, we restrict the placement to the low 4G unless the
caller opts into using the full range. This was done to allow usersapce
the opportunity to transition slowly from a 32b address space, and to
avoid breaking inherent 32b assumptions of some commands.
However, for insert we limited ourselves to 4G-4K, but on verification
we allowed the full 4G. This causes some attempts to bind a new buffer
to sporadically fail with -ENOSPC, but at other times be bound
successfully.
commit 48ea1e32c39d ("drm/i915/gen9: Set PIN_ZONE_4G end to 4GB - 1
page") suggests that there is a genuine problem with stateless addressing
that cannot utilize the last page in 4G and so we purposefully excluded
it. This means that the quick pin pass may cause us to utilize a buggy
placement.
Reported-by: CQ Tang <cq.tang(a)intel.com>
Testcase: igt/gem_exec_params/larger-than-life-batch
Fixes: 48ea1e32c39d ("drm/i915/gen9: Set PIN_ZONE_4G end to 4GB - 1 page")
Signed-off-by: Chris Wilson <chris(a)chris-wilson.co.uk>
Cc: CQ Tang <cq.tang(a)intel.com>
Reviewed-by: CQ Tang <cq.tang(a)intel.com>
Reviewed-by: Matthew Auld <matthew.auld(a)intel.com>
Cc: <stable(a)vger.kernel.org> # v4.5+
Link: https://patchwork.freedesktop.org/patch/msgid/20201216092951.7124-1-chris@c…
(cherry picked from commit 5f22cc0b134ab702d7f64b714e26018f7288ffee)
Signed-off-by: Jani Nikula <jani.nikula(a)intel.com>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b07dc1156a0e..bcc80f428172 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -382,7 +382,7 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
return true;
if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
- (vma->node.start + vma->node.size - 1) >> 32)
+ (vma->node.start + vma->node.size + 4095) >> 32)
return true;
if (flags & __EXEC_OBJECT_NEEDS_MAP &&
Hi Greg and Sasha,
Please apply the attached backports of commit
eff8728fe698 ("vmlinux.lds.h: Add PGO and AutoFDO input sections") to
4.4 through 5.4. I tried to make the file names obvious. This fixes a
boot failure on ARCH=arm with LLVM 12.
Cheers,
Nathan
The staging/exfat driver has departed, but a lot of distros are still tracking
5.4-stable, so we should fix this.
There was an 0/1 offset error in month handling for file metadata, causing
the month to get incremented on each reference to the file.
Thanks to Sebastian Gurtler for troubleshooting this, and Arpad Mueller
for bringing it to my attention.
Relevant discussions:
https://bugzilla.kernel.org/show_bug.cgi?id=210997https://bugs.launchpad.net/ubuntu/+source/ubuntu-meta/+bug/1872504
Signed-off-by: Valdis Kletnieks <valdis.kletnieks(a)vt.edu>
diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
index 3b2b0ceb7297..848258daf620 100644
--- a/drivers/staging/exfat/exfat_super.c
+++ b/drivers/staging/exfat/exfat_super.c
@@ -59,7 +59,7 @@ static void exfat_write_super(struct super_block *sb);
/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
static void exfat_time_fat2unix(struct timespec64 *ts, struct date_time_t *tp)
{
- ts->tv_sec = mktime64(tp->Year + 1980, tp->Month + 1, tp->Day,
+ ts->tv_sec = mktime64(tp->Year + 1980, tp->Month, tp->Day,
tp->Hour, tp->Minute, tp->Second);
ts->tv_nsec = tp->MilliSecond * NSEC_PER_MSEC;
Please consider applying the following patch for LLVM_IAS=1 support
for Android's i386 cuttlefish virtual device:
commit 78762b0e79bc ("x86/asm/32: Add ENDs to some functions and
relabel with SYM_CODE_*")
which first landed in v5.5-rc1.
There was a small conflict in arch/x86/xen/xen-asm_32.S due to the
order of backports that was trivial to resolve. You can see the list
of commits to it in linux-5.7.y:
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/log/arch/x…
as arch/x86/xen/xen-asm_32.S was removed in
commit a13f2ef168cb ("x86/xen: remove 32-bit Xen PV guest support") in v5.9-rc1.
--
Thanks,
~Nick Desaulniers
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 7a68d725e4ea384977445e0bcaed3d7de83ab5b3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jouni=20K=2E=20Sepp=C3=A4nen?= <jks(a)iki.fi>
Date: Tue, 5 Jan 2021 06:52:49 +0200
Subject: [PATCH] net: cdc_ncm: correct overhead in delayed_ndp_size
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Aligning to tx_ndp_modulus is not sufficient because the next align
call can be cdc_ncm_align_tail, which can add up to ctx->tx_modulus +
ctx->tx_remainder - 1 bytes. This used to lead to occasional crashes
on a Huawei 909s-120 LTE module as follows:
- the condition marked /* if there is a remaining skb [...] */ is true
so the swaps happen
- skb_out is set from ctx->tx_curr_skb
- skb_out->len is exactly 0x3f52
- ctx->tx_curr_size is 0x4000 and delayed_ndp_size is 0xac
(note that the sum of skb_out->len and delayed_ndp_size is 0x3ffe)
- the for loop over n is executed once
- the cdc_ncm_align_tail call marked /* align beginning of next frame */
increases skb_out->len to 0x3f56 (the sum is now 0x4002)
- the condition marked /* check if we had enough room left [...] */ is
false so we break out of the loop
- the condition marked /* If requested, put NDP at end of frame. */ is
true so the NDP is written into skb_out
- now skb_out->len is 0x4002, so padding_count is minus two interpreted
as an unsigned number, which is used as the length argument to memset,
leading to a crash with various symptoms but usually including
> Call Trace:
> <IRQ>
> cdc_ncm_fill_tx_frame+0x83a/0x970 [cdc_ncm]
> cdc_mbim_tx_fixup+0x1d9/0x240 [cdc_mbim]
> usbnet_start_xmit+0x5d/0x720 [usbnet]
The cdc_ncm_align_tail call first aligns on a ctx->tx_modulus
boundary (adding at most ctx->tx_modulus-1 bytes), then adds
ctx->tx_remainder bytes. Alternatively, the next alignment call can
occur in cdc_ncm_ndp16 or cdc_ncm_ndp32, in which case at most
ctx->tx_ndp_modulus-1 bytes are added.
A similar problem has occurred before, and the code is nontrivial to
reason about, so add a guard before the crashing call. By that time it
is too late to prevent any memory corruption (we'll have written past
the end of the buffer already) but we can at least try to get a warning
written into an on-disk log by avoiding the hard crash caused by padding
past the buffer with a huge number of zeros.
Signed-off-by: Jouni K. Seppänen <jks(a)iki.fi>
Fixes: 4a0e3e989d66 ("cdc_ncm: Add support for moving NDP to end of NCM frame")
Link: https://bugzilla.kernel.org/show_bug.cgi?id=209407
Reported-by: kernel test robot <lkp(a)intel.com>
Reviewed-by: Bjørn Mork <bjorn(a)mork.no>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 3b816a4731f2..5a78848db93f 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1199,7 +1199,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* accordingly. Otherwise, we should check here.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
- delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
+ delayed_ndp_size = ctx->max_ndp_size +
+ max_t(u32,
+ ctx->tx_ndp_modulus,
+ ctx->tx_modulus + ctx->tx_remainder) - 1;
else
delayed_ndp_size = 0;
@@ -1410,7 +1413,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
skb_out->len > ctx->min_tx_pkt) {
padding_count = ctx->tx_curr_size - skb_out->len;
- skb_put_zero(skb_out, padding_count);
+ if (!WARN_ON(padding_count > ctx->tx_curr_size))
+ skb_put_zero(skb_out, padding_count);
} else if (skb_out->len < ctx->tx_curr_size &&
(skb_out->len % dev->maxpacket) == 0) {
skb_put_u8(skb_out, 0); /* force short packet */
The patch below does not apply to the 4.4-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From b41b554c1ee75070a14c02a88496b1f231c7eacc Mon Sep 17 00:00:00 2001
From: Julian Wiedmann <jwi(a)linux.ibm.com>
Date: Thu, 7 Jan 2021 18:24:41 +0100
Subject: [PATCH] s390/qeth: fix locking for discipline setup / removal
Due to insufficient locking, qeth_core_set_online() and
qeth_dev_layer2_store() can run in parallel, both attempting to load &
setup the discipline (and stepping on each other toes along the way).
A similar race can also occur between qeth_core_remove_device() and
qeth_dev_layer2_store().
Access to .discipline is meant to be protected by the discipline_mutex,
so add/expand the locking in qeth_core_remove_device() and
qeth_core_set_online().
Adjust the locking in qeth_l*_remove_device() accordingly, as it's now
handled by the callers in a consistent manner.
Based on an initial patch by Ursula Braun.
Fixes: 9dc48ccc68b9 ("qeth: serialize sysfs-triggered device configurations")
Signed-off-by: Julian Wiedmann <jwi(a)linux.ibm.com>
Reviewed-by: Alexandra Winter <wintera(a)linux.ibm.com>
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d45e223fc521..cf18d87da41e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -6585,6 +6585,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
default:
card->info.layer_enforced = true;
+ /* It's so early that we don't need the discipline_mutex yet. */
rc = qeth_core_load_discipline(card, enforced_disc);
if (rc)
goto err_load;
@@ -6617,10 +6618,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv");
+ mutex_lock(&card->discipline_mutex);
if (card->discipline) {
card->discipline->remove(gdev);
qeth_core_free_discipline(card);
}
+ mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
@@ -6635,6 +6638,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
int rc = 0;
enum qeth_discipline_id def_discipline;
+ mutex_lock(&card->discipline_mutex);
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
@@ -6648,11 +6652,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
}
}
- mutex_lock(&card->discipline_mutex);
rc = qeth_set_online(card, card->discipline);
- mutex_unlock(&card->discipline_mutex);
err:
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 37279b1e29f6..4254caf1d9b6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -2207,11 +2207,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (gdev->state == CCWGROUP_ONLINE) {
- mutex_lock(&card->discipline_mutex);
+ if (gdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- mutex_unlock(&card->discipline_mutex);
- }
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8d474179ce98..6970597bc885 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1970,11 +1970,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (cgdev->state == CCWGROUP_ONLINE) {
- mutex_lock(&card->discipline_mutex);
+ if (cgdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- mutex_unlock(&card->discipline_mutex);
- }
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From b41b554c1ee75070a14c02a88496b1f231c7eacc Mon Sep 17 00:00:00 2001
From: Julian Wiedmann <jwi(a)linux.ibm.com>
Date: Thu, 7 Jan 2021 18:24:41 +0100
Subject: [PATCH] s390/qeth: fix locking for discipline setup / removal
Due to insufficient locking, qeth_core_set_online() and
qeth_dev_layer2_store() can run in parallel, both attempting to load &
setup the discipline (and stepping on each other toes along the way).
A similar race can also occur between qeth_core_remove_device() and
qeth_dev_layer2_store().
Access to .discipline is meant to be protected by the discipline_mutex,
so add/expand the locking in qeth_core_remove_device() and
qeth_core_set_online().
Adjust the locking in qeth_l*_remove_device() accordingly, as it's now
handled by the callers in a consistent manner.
Based on an initial patch by Ursula Braun.
Fixes: 9dc48ccc68b9 ("qeth: serialize sysfs-triggered device configurations")
Signed-off-by: Julian Wiedmann <jwi(a)linux.ibm.com>
Reviewed-by: Alexandra Winter <wintera(a)linux.ibm.com>
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d45e223fc521..cf18d87da41e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -6585,6 +6585,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
default:
card->info.layer_enforced = true;
+ /* It's so early that we don't need the discipline_mutex yet. */
rc = qeth_core_load_discipline(card, enforced_disc);
if (rc)
goto err_load;
@@ -6617,10 +6618,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv");
+ mutex_lock(&card->discipline_mutex);
if (card->discipline) {
card->discipline->remove(gdev);
qeth_core_free_discipline(card);
}
+ mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
@@ -6635,6 +6638,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
int rc = 0;
enum qeth_discipline_id def_discipline;
+ mutex_lock(&card->discipline_mutex);
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
@@ -6648,11 +6652,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
}
}
- mutex_lock(&card->discipline_mutex);
rc = qeth_set_online(card, card->discipline);
- mutex_unlock(&card->discipline_mutex);
err:
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 37279b1e29f6..4254caf1d9b6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -2207,11 +2207,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (gdev->state == CCWGROUP_ONLINE) {
- mutex_lock(&card->discipline_mutex);
+ if (gdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- mutex_unlock(&card->discipline_mutex);
- }
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8d474179ce98..6970597bc885 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1970,11 +1970,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (cgdev->state == CCWGROUP_ONLINE) {
- mutex_lock(&card->discipline_mutex);
+ if (cgdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- mutex_unlock(&card->discipline_mutex);
- }
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From b41b554c1ee75070a14c02a88496b1f231c7eacc Mon Sep 17 00:00:00 2001
From: Julian Wiedmann <jwi(a)linux.ibm.com>
Date: Thu, 7 Jan 2021 18:24:41 +0100
Subject: [PATCH] s390/qeth: fix locking for discipline setup / removal
Due to insufficient locking, qeth_core_set_online() and
qeth_dev_layer2_store() can run in parallel, both attempting to load &
setup the discipline (and stepping on each other toes along the way).
A similar race can also occur between qeth_core_remove_device() and
qeth_dev_layer2_store().
Access to .discipline is meant to be protected by the discipline_mutex,
so add/expand the locking in qeth_core_remove_device() and
qeth_core_set_online().
Adjust the locking in qeth_l*_remove_device() accordingly, as it's now
handled by the callers in a consistent manner.
Based on an initial patch by Ursula Braun.
Fixes: 9dc48ccc68b9 ("qeth: serialize sysfs-triggered device configurations")
Signed-off-by: Julian Wiedmann <jwi(a)linux.ibm.com>
Reviewed-by: Alexandra Winter <wintera(a)linux.ibm.com>
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d45e223fc521..cf18d87da41e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -6585,6 +6585,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
default:
card->info.layer_enforced = true;
+ /* It's so early that we don't need the discipline_mutex yet. */
rc = qeth_core_load_discipline(card, enforced_disc);
if (rc)
goto err_load;
@@ -6617,10 +6618,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv");
+ mutex_lock(&card->discipline_mutex);
if (card->discipline) {
card->discipline->remove(gdev);
qeth_core_free_discipline(card);
}
+ mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
@@ -6635,6 +6638,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
int rc = 0;
enum qeth_discipline_id def_discipline;
+ mutex_lock(&card->discipline_mutex);
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
@@ -6648,11 +6652,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
}
}
- mutex_lock(&card->discipline_mutex);
rc = qeth_set_online(card, card->discipline);
- mutex_unlock(&card->discipline_mutex);
err:
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 37279b1e29f6..4254caf1d9b6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -2207,11 +2207,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (gdev->state == CCWGROUP_ONLINE) {
- mutex_lock(&card->discipline_mutex);
+ if (gdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- mutex_unlock(&card->discipline_mutex);
- }
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8d474179ce98..6970597bc885 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1970,11 +1970,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (cgdev->state == CCWGROUP_ONLINE) {
- mutex_lock(&card->discipline_mutex);
+ if (cgdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- mutex_unlock(&card->discipline_mutex);
- }
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
The patch below does not apply to the 4.19-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From b41b554c1ee75070a14c02a88496b1f231c7eacc Mon Sep 17 00:00:00 2001
From: Julian Wiedmann <jwi(a)linux.ibm.com>
Date: Thu, 7 Jan 2021 18:24:41 +0100
Subject: [PATCH] s390/qeth: fix locking for discipline setup / removal
Due to insufficient locking, qeth_core_set_online() and
qeth_dev_layer2_store() can run in parallel, both attempting to load &
setup the discipline (and stepping on each other toes along the way).
A similar race can also occur between qeth_core_remove_device() and
qeth_dev_layer2_store().
Access to .discipline is meant to be protected by the discipline_mutex,
so add/expand the locking in qeth_core_remove_device() and
qeth_core_set_online().
Adjust the locking in qeth_l*_remove_device() accordingly, as it's now
handled by the callers in a consistent manner.
Based on an initial patch by Ursula Braun.
Fixes: 9dc48ccc68b9 ("qeth: serialize sysfs-triggered device configurations")
Signed-off-by: Julian Wiedmann <jwi(a)linux.ibm.com>
Reviewed-by: Alexandra Winter <wintera(a)linux.ibm.com>
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d45e223fc521..cf18d87da41e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -6585,6 +6585,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
default:
card->info.layer_enforced = true;
+ /* It's so early that we don't need the discipline_mutex yet. */
rc = qeth_core_load_discipline(card, enforced_disc);
if (rc)
goto err_load;
@@ -6617,10 +6618,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv");
+ mutex_lock(&card->discipline_mutex);
if (card->discipline) {
card->discipline->remove(gdev);
qeth_core_free_discipline(card);
}
+ mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
@@ -6635,6 +6638,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
int rc = 0;
enum qeth_discipline_id def_discipline;
+ mutex_lock(&card->discipline_mutex);
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
@@ -6648,11 +6652,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
}
}
- mutex_lock(&card->discipline_mutex);
rc = qeth_set_online(card, card->discipline);
- mutex_unlock(&card->discipline_mutex);
err:
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 37279b1e29f6..4254caf1d9b6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -2207,11 +2207,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (gdev->state == CCWGROUP_ONLINE) {
- mutex_lock(&card->discipline_mutex);
+ if (gdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- mutex_unlock(&card->discipline_mutex);
- }
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8d474179ce98..6970597bc885 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1970,11 +1970,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (cgdev->state == CCWGROUP_ONLINE) {
- mutex_lock(&card->discipline_mutex);
+ if (cgdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- mutex_unlock(&card->discipline_mutex);
- }
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
The patch below does not apply to the 5.4-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From b41b554c1ee75070a14c02a88496b1f231c7eacc Mon Sep 17 00:00:00 2001
From: Julian Wiedmann <jwi(a)linux.ibm.com>
Date: Thu, 7 Jan 2021 18:24:41 +0100
Subject: [PATCH] s390/qeth: fix locking for discipline setup / removal
Due to insufficient locking, qeth_core_set_online() and
qeth_dev_layer2_store() can run in parallel, both attempting to load &
setup the discipline (and stepping on each other toes along the way).
A similar race can also occur between qeth_core_remove_device() and
qeth_dev_layer2_store().
Access to .discipline is meant to be protected by the discipline_mutex,
so add/expand the locking in qeth_core_remove_device() and
qeth_core_set_online().
Adjust the locking in qeth_l*_remove_device() accordingly, as it's now
handled by the callers in a consistent manner.
Based on an initial patch by Ursula Braun.
Fixes: 9dc48ccc68b9 ("qeth: serialize sysfs-triggered device configurations")
Signed-off-by: Julian Wiedmann <jwi(a)linux.ibm.com>
Reviewed-by: Alexandra Winter <wintera(a)linux.ibm.com>
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d45e223fc521..cf18d87da41e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -6585,6 +6585,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
default:
card->info.layer_enforced = true;
+ /* It's so early that we don't need the discipline_mutex yet. */
rc = qeth_core_load_discipline(card, enforced_disc);
if (rc)
goto err_load;
@@ -6617,10 +6618,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv");
+ mutex_lock(&card->discipline_mutex);
if (card->discipline) {
card->discipline->remove(gdev);
qeth_core_free_discipline(card);
}
+ mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
@@ -6635,6 +6638,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
int rc = 0;
enum qeth_discipline_id def_discipline;
+ mutex_lock(&card->discipline_mutex);
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
@@ -6648,11 +6652,10 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
}
}
- mutex_lock(&card->discipline_mutex);
rc = qeth_set_online(card, card->discipline);
- mutex_unlock(&card->discipline_mutex);
err:
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 37279b1e29f6..4254caf1d9b6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -2207,11 +2207,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (gdev->state == CCWGROUP_ONLINE) {
- mutex_lock(&card->discipline_mutex);
+ if (gdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- mutex_unlock(&card->discipline_mutex);
- }
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8d474179ce98..6970597bc885 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1970,11 +1970,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
- if (cgdev->state == CCWGROUP_ONLINE) {
- mutex_lock(&card->discipline_mutex);
+ if (cgdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- mutex_unlock(&card->discipline_mutex);
- }
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
From: Jarkko Sakkinen <jarkko(a)kernel.org>
BUG_ON() should not be used in the kernel code, unless there are
exceptional reasons to do so. Replace BUG_ON() with WARN() and
return.
Cc: stable(a)vger.kernel.org
Fixes: b3811d36a3e7 ("KEYS: checking the input id parameters before finding asymmetric key")
Signed-off-by: Jarkko Sakkinen <jarkko(a)kernel.org>
---
crypto/asymmetric_keys/asymmetric_type.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index 33e77d846caa..47cc88fa0fa7 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -54,7 +54,10 @@ struct key *find_asymmetric_key(struct key *keyring,
char *req, *p;
int len;
- BUG_ON(!id_0 && !id_1);
+ if (!id_0 && !id_1) {
+ WARN(1, "All ID's are NULL\n");
+ return ERR_PTR(-EINVAL);
+ }
if (id_0) {
lookup = id_0->data;
--
2.29.2
This is a note to let you know that I've just added the patch titled
iio: ad5504: Fix setting power-down state
to my staging git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
in the staging-linus branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will hopefully also be merged in Linus's tree for the
next -rc kernel release.
If you have any questions about this process, please let me know.
>From efd597b2839a9895e8a98fcb0b76d2f545802cd4 Mon Sep 17 00:00:00 2001
From: Lars-Peter Clausen <lars(a)metafoo.de>
Date: Wed, 9 Dec 2020 11:46:49 +0100
Subject: iio: ad5504: Fix setting power-down state
The power-down mask of the ad5504 is actually a power-up mask. Meaning if
a bit is set the corresponding channel is powered up and if it is not set
the channel is powered down.
The driver currently has this the wrong way around, resulting in the
channel being powered up when requested to be powered down and vice versa.
Fixes: 3bbbf150ffde ("staging:iio:dac:ad5504: Use strtobool for boolean values")
Signed-off-by: Lars-Peter Clausen <lars(a)metafoo.de>
Acked-by: Alexandru Ardelean <alexandru.ardelean(a)analog.com>
Link: https://lore.kernel.org/r/20201209104649.5794-1-lars@metafoo.de
Cc: <Stable(a)vger.kernel.org>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
---
drivers/iio/dac/ad5504.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 28921b62e642..e9297c25d4ef 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -187,9 +187,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
return ret;
if (pwr_down)
- st->pwr_down_mask |= (1 << chan->channel);
- else
st->pwr_down_mask &= ~(1 << chan->channel);
+ else
+ st->pwr_down_mask |= (1 << chan->channel);
ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
--
2.30.0
This is a note to let you know that I've just added the patch titled
iio: common: st_sensors: fix possible infinite loop in
to my staging git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
in the staging-linus branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will hopefully also be merged in Linus's tree for the
next -rc kernel release.
If you have any questions about this process, please let me know.
>From 40c48fb79b9798954691f24b8ece1d3a7eb1b353 Mon Sep 17 00:00:00 2001
From: Lorenzo Bianconi <lorenzo(a)kernel.org>
Date: Tue, 8 Dec 2020 15:36:40 +0100
Subject: iio: common: st_sensors: fix possible infinite loop in
st_sensors_irq_thread
Return a boolean value in st_sensors_new_samples_available routine in
order to avoid an infinite loop in st_sensors_irq_thread if
stat_drdy.addr is not defined or stat_drdy read fails
Fixes: 90efe05562921 ("iio: st_sensors: harden interrupt handling")
Reported-by: Jonathan Cameron <jic23(a)kernel.org>
Signed-off-by: Lorenzo Bianconi <lorenzo(a)kernel.org>
Reviewed-by: Linus Walleij <linus.walleij(a)linaro.org>
Link: https://lore.kernel.org/r/c9ec69ed349e7200c779fd7a5bf04c1aaa2817aa.16074381…
Cc: <Stable(a)vger.kernel.org>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
---
.../common/st_sensors/st_sensors_trigger.c | 31 ++++++++++---------
1 file changed, 17 insertions(+), 14 deletions(-)
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 0507283bd4c1..2dbd2646e44e 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -23,35 +23,31 @@
* @sdata: Sensor data.
*
* returns:
- * 0 - no new samples available
- * 1 - new samples available
- * negative - error or unknown
+ * false - no new samples available or read error
+ * true - new samples available
*/
-static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
- struct st_sensor_data *sdata)
+static bool st_sensors_new_samples_available(struct iio_dev *indio_dev,
+ struct st_sensor_data *sdata)
{
int ret, status;
/* How would I know if I can't check it? */
if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
- return -EINVAL;
+ return true;
/* No scan mask, no interrupt */
if (!indio_dev->active_scan_mask)
- return 0;
+ return false;
ret = regmap_read(sdata->regmap,
sdata->sensor_settings->drdy_irq.stat_drdy.addr,
&status);
if (ret < 0) {
dev_err(sdata->dev, "error checking samples available\n");
- return ret;
+ return false;
}
- if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask)
- return 1;
-
- return 0;
+ return !!(status & sdata->sensor_settings->drdy_irq.stat_drdy.mask);
}
/**
@@ -180,9 +176,15 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
/* Tell the interrupt handler that we're dealing with edges */
if (irq_trig == IRQF_TRIGGER_FALLING ||
- irq_trig == IRQF_TRIGGER_RISING)
+ irq_trig == IRQF_TRIGGER_RISING) {
+ if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
+ dev_err(&indio_dev->dev,
+ "edge IRQ not supported w/o stat register.\n");
+ err = -EOPNOTSUPP;
+ goto iio_trigger_free;
+ }
sdata->edge_irq = true;
- else
+ } else {
/*
* If we're not using edges (i.e. level interrupts) we
* just mask off the IRQ, handle one interrupt, then
@@ -190,6 +192,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
* interrupt handler top half again and start over.
*/
irq_trig |= IRQF_ONESHOT;
+ }
/*
* If the interrupt pin is Open Drain, by definition this
--
2.30.0
This is a note to let you know that I've just added the patch titled
counter:ti-eqep: remove floor
to my staging git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
in the staging-linus branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will hopefully also be merged in Linus's tree for the
next -rc kernel release.
If you have any questions about this process, please let me know.
>From 49a9565a7a7ce168e3e6482fb24e62d12f72ab81 Mon Sep 17 00:00:00 2001
From: David Lechner <david(a)lechnology.com>
Date: Sun, 13 Dec 2020 18:09:27 -0600
Subject: counter:ti-eqep: remove floor
The hardware doesn't support this. QPOSINIT is an initialization value
that is triggered by other things. When the counter overflows, it
always wraps around to zero.
Fixes: f213729f6796 "counter: new TI eQEP driver"
Signed-off-by: David Lechner <david(a)lechnology.com>
Acked-by: William Breathitt Gray <vilhelm.gray(a)gmail.com>
Link: https://lore.kernel.org/r/20201214000927.1793062-1-david@lechnology.com
Cc: <Stable(a)vger.kernel.org>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
---
drivers/counter/ti-eqep.c | 35 -----------------------------------
1 file changed, 35 deletions(-)
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index a60aee1a1a29..65df9ef5b5bc 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -235,36 +235,6 @@ static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
return len;
}
-static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, char *buf)
-{
- struct ti_eqep_cnt *priv = counter->priv;
- u32 qposinit;
-
- regmap_read(priv->regmap32, QPOSINIT, &qposinit);
-
- return sprintf(buf, "%u\n", qposinit);
-}
-
-static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
- struct counter_count *count,
- void *ext_priv, const char *buf,
- size_t len)
-{
- struct ti_eqep_cnt *priv = counter->priv;
- int err;
- u32 res;
-
- err = kstrtouint(buf, 0, &res);
- if (err < 0)
- return err;
-
- regmap_write(priv->regmap32, QPOSINIT, res);
-
- return len;
-}
-
static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
struct counter_count *count,
void *ext_priv, char *buf)
@@ -301,11 +271,6 @@ static struct counter_count_ext ti_eqep_position_ext[] = {
.read = ti_eqep_position_ceiling_read,
.write = ti_eqep_position_ceiling_write,
},
- {
- .name = "floor",
- .read = ti_eqep_position_floor_read,
- .write = ti_eqep_position_floor_write,
- },
{
.name = "enable",
.read = ti_eqep_position_enable_read,
--
2.30.0
This is a note to let you know that I've just added the patch titled
drivers: iio: temperature: Add delay after the addressed reset
to my staging git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
in the staging-linus branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will hopefully also be merged in Linus's tree for the
next -rc kernel release.
If you have any questions about this process, please let me know.
>From cf5b1385d748b2f91b0c05bb301fcaf9bdbad385 Mon Sep 17 00:00:00 2001
From: Slaveyko Slaveykov <sis(a)melexis.com>
Date: Wed, 16 Dec 2020 13:57:20 +0200
Subject: drivers: iio: temperature: Add delay after the addressed reset
command in mlx90632.c
After an I2C reset command, the mlx90632 needs some time before
responding to other I2C commands. Without that delay, there is a chance
that the I2C command(s) after the reset will not be accepted.
Signed-off-by: Slaveyko Slaveykov <sis(a)melexis.com>
Reviewed-by: Andy Shevchenko <andy.shevchenko(a)gmail.com>
Reviewed-by: Crt Mori <cmo(a)melexis.com>
Fixes: e02472f74a81 ("iio:temperature:mlx90632: Adding extended calibration option")
Link: https://lore.kernel.org/r/20201216115720.12404-2-sis@melexis.com
Cc: <Stable(a)vger.kernel.org>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
---
drivers/iio/temperature/mlx90632.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index 503fe54a0bb9..608ccb1d8bc8 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -248,6 +248,12 @@ static int mlx90632_set_meas_type(struct regmap *regmap, u8 type)
if (ret < 0)
return ret;
+ /*
+ * Give the mlx90632 some time to reset properly before sending a new I2C command
+ * if this is not done, the following I2C command(s) will not be accepted.
+ */
+ usleep_range(150, 200);
+
ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
(MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
(MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT));
--
2.30.0
This is a note to let you know that I've just added the patch titled
iio: adc: ti_am335x_adc: remove omitted iio_kfifo_free()
to my staging git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
in the staging-linus branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will hopefully also be merged in Linus's tree for the
next -rc kernel release.
If you have any questions about this process, please let me know.
>From 7e6d9788aa02333a4353058816d52b9a90aae0d3 Mon Sep 17 00:00:00 2001
From: Alexandru Ardelean <alexandru.ardelean(a)analog.com>
Date: Thu, 3 Dec 2020 09:26:50 +0200
Subject: iio: adc: ti_am335x_adc: remove omitted iio_kfifo_free()
When the conversion was done to use devm_iio_kfifo_allocate(), a call to
iio_kfifo_free() was omitted (to be removed).
This change removes it.
Fixes: 3c5308058899 ("iio: adc: ti_am335x_adc: alloc kfifo & IRQ via devm_ functions")
Signed-off-by: Alexandru Ardelean <alexandru.ardelean(a)analog.com>
Link: https://lore.kernel.org/r/20201203072650.24128-1-alexandru.ardelean@analog.…
Cc: <Stable(a)vger.kernel.org>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron(a)huawei.com>
---
drivers/iio/adc/ti_am335x_adc.c | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index b11c8c47ba2a..e946903b0993 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -397,16 +397,12 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev,
ret = devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh,
flags, indio_dev->name, indio_dev);
if (ret)
- goto error_kfifo_free;
+ return ret;
indio_dev->setup_ops = setup_ops;
indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
return 0;
-
-error_kfifo_free:
- iio_kfifo_free(indio_dev->buffer);
- return ret;
}
static const char * const chan_name_ain[] = {
--
2.30.0