6.18-stable review patch. If anyone has any objections, please let me know.
------------------
From: Chenghao Duan duanchenghao@kylinos.cn
commit 26138762d9a27a7f1c33f467c4123c600f64a36e upstream.
Remove the previous restrictions that blocked the tracing of kernel module functions. Fix the issue that previously caused kernel lockups when attempting to trace module functions.
Before entering the trampoline code, the return address register ra shall store the address of the next assembly instruction after the 'bl trampoline' instruction, which is the traced function address, and the register t0 shall store the parent function return address. Refine the trampoline return logic to ensure that register data remains correct when returning to both the traced function and the parent function.
Before this patch was applied, the module_attach test in selftests/bpf encountered a deadlock issue. This was caused by an incorrect jump address after the trampoline execution, which resulted in an infinite loop within the module function.
Cc: stable@vger.kernel.org Fixes: 677e6123e3d2 ("LoongArch: BPF: Disable trampoline for kernel module function trace") Signed-off-by: Chenghao Duan duanchenghao@kylinos.cn Signed-off-by: Huacai Chen chenhuacai@loongson.cn Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- arch/loongarch/net/bpf_jit.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-)
--- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -1284,7 +1284,7 @@ static int emit_jump_or_nops(void *targe return 0; }
- return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_T0 : LOONGARCH_GPR_ZERO, (u64)target); + return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_RA : LOONGARCH_GPR_ZERO, (u64)target); }
static int emit_call(struct jit_ctx *ctx, u64 addr) @@ -1638,14 +1638,12 @@ static int __arch_prepare_bpf_trampoline
/* To traced function */ /* Ftrace jump skips 2 NOP instructions */ - if (is_kernel_text((unsigned long)orig_call)) + if (is_kernel_text((unsigned long)orig_call) || + is_module_text_address((unsigned long)orig_call)) orig_call += LOONGARCH_FENTRY_NBYTES; /* Direct jump skips 5 NOP instructions */ else if (is_bpf_text_address((unsigned long)orig_call)) orig_call += LOONGARCH_BPF_FENTRY_NBYTES; - /* Module tracing not supported - cause kernel lockups */ - else if (is_module_text_address((unsigned long)orig_call)) - return -ENOTSUPP;
if (flags & BPF_TRAMP_F_CALL_ORIG) { move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im); @@ -1738,12 +1736,16 @@ static int __arch_prepare_bpf_trampoline emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, 0); emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, 16);
- if (flags & BPF_TRAMP_F_SKIP_FRAME) + if (flags & BPF_TRAMP_F_SKIP_FRAME) { /* return to parent function */ - emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_RA, 0); - else - /* return to traced function */ + move_reg(ctx, LOONGARCH_GPR_RA, LOONGARCH_GPR_T0); emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T0, 0); + } else { + /* return to traced function */ + move_reg(ctx, LOONGARCH_GPR_T1, LOONGARCH_GPR_RA); + move_reg(ctx, LOONGARCH_GPR_RA, LOONGARCH_GPR_T0); + emit_insn(ctx, jirl, LOONGARCH_GPR_ZERO, LOONGARCH_GPR_T1, 0); + } }
ret = ctx->idx;