If the value of the link register is not correct (tail call from asm
that didn't set it, stack corruption, memory no longer mapped), then
using it for an address calculation may trigger an exception. Without a
fixup handler, this will lead to a panic, which will unwind, which will
trigger the fault repeatedly in an infinite loop.
We don't observe such failures currently, but we have. Just to be safe,
add a fixup handler here so that at least we don't have an infinite
loop.
Cc: stable(a)vger.kernel.org
Fixes: commit 6dc5fd93b2f1 ("ARM: 8900/1: UNWINDER_FRAME_POINTER implementation for Clang")
Reported-by: Miles Chen <miles.chen(a)mediatek.com>
Signed-off-by: Nick Desaulniers <ndesaulniers(a)google.com>
---
arch/arm/lib/backtrace-clang.S | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/arch/arm/lib/backtrace-clang.S b/arch/arm/lib/backtrace-clang.S
index 5388ac664c12..40eb2215eaf4 100644
--- a/arch/arm/lib/backtrace-clang.S
+++ b/arch/arm/lib/backtrace-clang.S
@@ -146,7 +146,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions
tst sv_lr, #0 @ If there's no previous lr,
beq finished_setup @ we're done.
- ldr r0, [sv_lr, #-4] @ get call instruction
+prev_call: ldr r0, [sv_lr, #-4] @ get call instruction
ldr r3, .Lopcode+4
and r2, r3, r0 @ is this a bl call
teq r2, r3
@@ -206,6 +206,13 @@ finished_setup:
mov r2, frame
bl printk
no_frame: ldmfd sp!, {r4 - r9, fp, pc}
+/*
+ * Accessing the address pointed to by the link register triggered an
+ * exception, don't try to unwind through it.
+ */
+bad_lr: mov sv_fp, #0
+ mov sv_lr, #0
+ b finished_setup
ENDPROC(c_backtrace)
.pushsection __ex_table,"a"
.align 3
@@ -214,6 +221,7 @@ ENDPROC(c_backtrace)
.long 1003b, 1006b
.long 1004b, 1006b
.long 1005b, 1006b
+ .long prev_call, bad_lr
.popsection
.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n"
--
2.28.0.163.g6104cc2f0b6-goog
An earlier commit:
b7db41c9e03b ("io_uring: fix regression with always ignoring signals in io_cqring_wait()")
ensured that we didn't get stuck waiting for eventfd reads when it's
registered with the io_uring ring for event notification, but we still
have a gap where the task can be waiting on other events in the kernel
and need a bigger nudge to make forward progress.
Ensure that we use signaled notifications for a task that isn't currently
running, to be certain the work is seen and processed immediately.
Cc: stable(a)vger.kernel.org # v5.7+
Reported-by: Josef <josef.grieb(a)gmail.com>
Signed-off-by: Jens Axboe <axboe(a)kernel.dk>
---
fs/io_uring.c | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e9b27cdaa735..443eecdfeda9 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1712,21 +1712,27 @@ static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
struct io_ring_ctx *ctx = req->ctx;
int ret, notify = TWA_RESUME;
+ ret = __task_work_add(tsk, cb);
+ if (unlikely(ret))
+ return ret;
+
/*
* SQPOLL kernel thread doesn't need notification, just a wakeup.
- * If we're not using an eventfd, then TWA_RESUME is always fine,
- * as we won't have dependencies between request completions for
- * other kernel wait conditions.
+ * For any other work, use signaled wakeups if the task isn't
+ * running to avoid dependencies between tasks or threads. If
+ * the issuing task is currently waiting in the kernel on a thread,
+ * and same thread is waiting for a completion event, then we need
+ * to ensure that the issuing task processes task_work. TWA_SIGNAL
+ * is needed for that.
*/
if (ctx->flags & IORING_SETUP_SQPOLL)
notify = 0;
- else if (ctx->cq_ev_fd)
+ else if (READ_ONCE(tsk->state) != TASK_RUNNING)
notify = TWA_SIGNAL;
- ret = task_work_add(tsk, cb, notify);
- if (!ret)
- wake_up_process(tsk);
- return ret;
+ __task_work_notify(tsk, notify);
+ wake_up_process(tsk);
+ return 0;
}
static void __io_req_task_cancel(struct io_kiocb *req, int error)
--
2.28.0