1. Patch 1 and Patch 2 are dependent patches to resolve the BPF check error in 32-bit ARM. 2. Patch 3 supports bpf fkunc in 32-bit ARM. 3. Patch 4 is used to add test cases to cover some parameter scenarios states by AAPCS.
The following is the test_progs result in the 32-bit ARM environment:
# uname -a Linux qemuarm32 6.1.0-rc3+ #2 SMP Thu Nov 3 15:31:29 CST 2022 armv7l armv7l armv7l GNU/Linux # echo 1 > /proc/sys/net/core/bpf_jit_enable # ./test_progs -t kfunc_call #1/1 kfunc_call/kfunc_syscall_test_fail:OK #1/2 kfunc_call/kfunc_syscall_test_null_fail:OK #1/3 kfunc_call/kfunc_call_test_get_mem_fail_rdonly:OK #1/4 kfunc_call/kfunc_call_test_get_mem_fail_use_after_free:OK #1/5 kfunc_call/kfunc_call_test_get_mem_fail_oob:OK #1/6 kfunc_call/kfunc_call_test_get_mem_fail_not_const:OK #1/7 kfunc_call/kfunc_call_test_mem_acquire_fail:OK #1/8 kfunc_call/kfunc_call_test1:OK #1/9 kfunc_call/kfunc_call_test2:OK #1/10 kfunc_call/kfunc_call_test4:OK #1/11 kfunc_call/kfunc_call_test_ref_btf_id:OK #1/12 kfunc_call/kfunc_call_test_get_mem:OK #1/13 kfunc_call/kfunc_syscall_test:OK #1/14 kfunc_call/kfunc_syscall_test_null:OK #1/17 kfunc_call/destructive:OK
Yang Jihong (4): bpf: Adapt 32-bit return value kfunc for 32-bit ARM when zext extension bpf: Remove size check for sk in bpf_skb_is_valid_access for 32-bit architecture bpf: Add kernel function call support in 32-bit ARM bpf:selftests: Add kfunc_call test for mixing 32-bit and 64-bit parameters
arch/arm/net/bpf_jit_32.c | 130 ++++++++++++++++++ kernel/bpf/verifier.c | 3 + net/bpf/test_run.c | 6 + net/core/filter.c | 2 - .../selftests/bpf/prog_tests/kfunc_call.c | 1 + .../selftests/bpf/progs/kfunc_call_test.c | 23 ++++ 6 files changed, 163 insertions(+), 2 deletions(-)
For ARM32 architecture, if data width of kfunc return value is 32 bits, need to do explicit zero extension for high 32-bit, insn_def_regno should return dst_reg for BPF_JMP type of BPF_PSEUDO_KFUNC_CALL. Otherwise, opt_subreg_zext_lo32_rnd_hi32 returns -EFAULT, resulting in BPF failure.
Signed-off-by: Yang Jihong yangjihong1@huawei.com --- kernel/bpf/verifier.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7f0a9f6cb889..bac37757ffca 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2404,6 +2404,9 @@ static int insn_def_regno(const struct bpf_insn *insn) { switch (BPF_CLASS(insn->code)) { case BPF_JMP: + if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) + return insn->dst_reg; + fallthrough; case BPF_JMP32: case BPF_ST: return -1;
The error code -EACCES is returned when bpf prog is tested in 32-bit environment, This is because bpf_object__relocate modifies the instruction to change memory size to 4 bytes, as shown in the following messages:
libbpf: prog 'kfunc_call_test1': relo #2: matching candidate #0 <byte_off> [18342] struct __sk_buff.sk (0:30:0 @ offset 168) libbpf: prog 'kfunc_call_test1': relo #2: patched insn #1 (LDX/ST/STX) off 168 -> 168 libbpf: prog 'kfunc_call_test1': relo #2: patched insn #1 (LDX/ST/STX) mem_sz 8 -> 4
As a result, the bpf_skb_is_valid_access check fails. For 32-bit architecture, unnecessary checks need to be deleted.
Signed-off-by: Yang Jihong yangjihong1@huawei.com --- net/core/filter.c | 2 -- 1 file changed, 2 deletions(-)
diff --git a/net/core/filter.c b/net/core/filter.c index bb0136e7a8e4..eab7ce89740c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8269,8 +8269,6 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type return false; break; case offsetof(struct __sk_buff, sk): - if (type == BPF_WRITE || size != sizeof(__u64)) - return false; info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; break; case offsetof(struct __sk_buff, tstamp_type):
On Thu, Nov 3, 2022 at 1:36 AM Yang Jihong yangjihong1@huawei.com wrote:
The error code -EACCES is returned when bpf prog is tested in 32-bit environment, This is because bpf_object__relocate modifies the instruction to change memory size to 4 bytes, as shown in the following messages:
libbpf: prog 'kfunc_call_test1': relo #2: matching candidate #0 <byte_off> [18342] struct __sk_buff.sk (0:30:0 @ offset 168) libbpf: prog 'kfunc_call_test1': relo #2: patched insn #1 (LDX/ST/STX) off 168 -> 168 libbpf: prog 'kfunc_call_test1': relo #2: patched insn #1 (LDX/ST/STX) mem_sz 8 -> 4
As a result, the bpf_skb_is_valid_access check fails. For 32-bit architecture, unnecessary checks need to be deleted.
Signed-off-by: Yang Jihong yangjihong1@huawei.com
net/core/filter.c | 2 -- 1 file changed, 2 deletions(-)
diff --git a/net/core/filter.c b/net/core/filter.c index bb0136e7a8e4..eab7ce89740c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8269,8 +8269,6 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type return false; break; case offsetof(struct __sk_buff, sk):
if (type == BPF_WRITE || size != sizeof(__u64))
return false;
this probably should be specific to host architecture bitness? I'd imagine that size = 4 should be invalid on 64-bit arches (reading half of the pointer is bad)
either way, please make sure to add tests specifically for this case in test_verifier
info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; break; case offsetof(struct __sk_buff, tstamp_type):
-- 2.30.GIT
On Fri, Nov 4, 2022 at 2:56 PM Andrii Nakryiko andrii.nakryiko@gmail.com wrote:
On Thu, Nov 3, 2022 at 1:36 AM Yang Jihong yangjihong1@huawei.com wrote:
The error code -EACCES is returned when bpf prog is tested in 32-bit environment, This is because bpf_object__relocate modifies the instruction to change memory size to 4 bytes, as shown in the following messages:
libbpf: prog 'kfunc_call_test1': relo #2: matching candidate #0 <byte_off> [18342] struct __sk_buff.sk (0:30:0 @ offset 168) libbpf: prog 'kfunc_call_test1': relo #2: patched insn #1 (LDX/ST/STX) off 168 -> 168 libbpf: prog 'kfunc_call_test1': relo #2: patched insn #1 (LDX/ST/STX) mem_sz 8 -> 4
As a result, the bpf_skb_is_valid_access check fails. For 32-bit architecture, unnecessary checks need to be deleted.
Signed-off-by: Yang Jihong yangjihong1@huawei.com
net/core/filter.c | 2 -- 1 file changed, 2 deletions(-)
diff --git a/net/core/filter.c b/net/core/filter.c index bb0136e7a8e4..eab7ce89740c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8269,8 +8269,6 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type return false; break; case offsetof(struct __sk_buff, sk):
if (type == BPF_WRITE || size != sizeof(__u64))
return false;
this probably should be specific to host architecture bitness? I'd imagine that size = 4 should be invalid on 64-bit arches (reading half of the pointer is bad)
Not quite. In __sk_buff the field 'sk' is defined as: __bpf_md_ptr(struct bpf_sock *, sk); so it's always 64-bit load when bpf prog reads it. In this case CO_RE shouldn't have been applied to uapi struct __sk_buff.
On Fri, Nov 4, 2022 at 4:32 PM Alexei Starovoitov alexei.starovoitov@gmail.com wrote:
On Fri, Nov 4, 2022 at 2:56 PM Andrii Nakryiko andrii.nakryiko@gmail.com wrote:
On Thu, Nov 3, 2022 at 1:36 AM Yang Jihong yangjihong1@huawei.com wrote:
The error code -EACCES is returned when bpf prog is tested in 32-bit environment, This is because bpf_object__relocate modifies the instruction to change memory size to 4 bytes, as shown in the following messages:
libbpf: prog 'kfunc_call_test1': relo #2: matching candidate #0 <byte_off> [18342] struct __sk_buff.sk (0:30:0 @ offset 168) libbpf: prog 'kfunc_call_test1': relo #2: patched insn #1 (LDX/ST/STX) off 168 -> 168 libbpf: prog 'kfunc_call_test1': relo #2: patched insn #1 (LDX/ST/STX) mem_sz 8 -> 4
As a result, the bpf_skb_is_valid_access check fails. For 32-bit architecture, unnecessary checks need to be deleted.
Signed-off-by: Yang Jihong yangjihong1@huawei.com
net/core/filter.c | 2 -- 1 file changed, 2 deletions(-)
diff --git a/net/core/filter.c b/net/core/filter.c index bb0136e7a8e4..eab7ce89740c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8269,8 +8269,6 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type return false; break; case offsetof(struct __sk_buff, sk):
if (type == BPF_WRITE || size != sizeof(__u64))
return false;
this probably should be specific to host architecture bitness? I'd imagine that size = 4 should be invalid on 64-bit arches (reading half of the pointer is bad)
Not quite. In __sk_buff the field 'sk' is defined as: __bpf_md_ptr(struct bpf_sock *, sk); so it's always 64-bit load when bpf prog reads it. In this case CO_RE shouldn't have been applied to uapi struct __sk_buff.
Ok, hold on. __bpf_md_ptr just creates a 8-byte sized and aligned union. It doesn't change the pointer itself in any way:
union { struct bpf_sock* sk; __u64 :64; };
It's a 64-bit pointer only because any pointer in the BPF target is 64-bit. But on 32-bit architectures such struct bpf_sock *sk pointer will *actually* be 4-byte pointer (and __u64 :64 will just make compiler add 4 bytes of padding after it, effectively), and BPF verifier will actually generate LDX instruction of BPF_W size (4 byte load):
case offsetof(struct __sk_buff, sk): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); break;
BPF_FIELD_SIZEOF(struct sk_buff, sk) is 4 for 32-bit kernels.
So while you are correct that it will be 8-byte load from the BPF side, allowing 4-byte load for such pointers should also be correct. It's our choice, there is no fundamental limitation why this shouldn't be the case.
Note also that we do this transformation when fentry/fexit/raw_tp_btf programs traverse pointers in kernel structures. There pretending like pointer to an 8-byte value is actually invalid. So libbpf adjusts such loads to 4-byte loads for CO-RE-relocatable types, which makes it all work transparently on 32-bit architectures. Context accesses deviate from that, as they came earlier and we didn't have CO-RE at that time.
So what you are saying is that __sk_buff shouldn't be CO-RE-relocatable, and yes, that would be good. But I think that's orthogonal in this case.
On 11/7/22 5:07 PM, Andrii Nakryiko wrote:
On Fri, Nov 4, 2022 at 4:32 PM Alexei Starovoitov alexei.starovoitov@gmail.com wrote:
On Fri, Nov 4, 2022 at 2:56 PM Andrii Nakryiko andrii.nakryiko@gmail.com wrote:
On Thu, Nov 3, 2022 at 1:36 AM Yang Jihong yangjihong1@huawei.com wrote:
The error code -EACCES is returned when bpf prog is tested in 32-bit environment, This is because bpf_object__relocate modifies the instruction to change memory size to 4 bytes, as shown in the following messages:
libbpf: prog 'kfunc_call_test1': relo #2: matching candidate #0 <byte_off> [18342] struct __sk_buff.sk (0:30:0 @ offset 168) libbpf: prog 'kfunc_call_test1': relo #2: patched insn #1 (LDX/ST/STX) off 168 -> 168 libbpf: prog 'kfunc_call_test1': relo #2: patched insn #1 (LDX/ST/STX) mem_sz 8 -> 4
As a result, the bpf_skb_is_valid_access check fails. For 32-bit architecture, unnecessary checks need to be deleted.
Signed-off-by: Yang Jihong yangjihong1@huawei.com
net/core/filter.c | 2 -- 1 file changed, 2 deletions(-)
diff --git a/net/core/filter.c b/net/core/filter.c index bb0136e7a8e4..eab7ce89740c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8269,8 +8269,6 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type return false; break; case offsetof(struct __sk_buff, sk):
if (type == BPF_WRITE || size != sizeof(__u64))
return false;
this probably should be specific to host architecture bitness? I'd imagine that size = 4 should be invalid on 64-bit arches (reading half of the pointer is bad)
Not quite. In __sk_buff the field 'sk' is defined as: __bpf_md_ptr(struct bpf_sock *, sk); so it's always 64-bit load when bpf prog reads it. In this case CO_RE shouldn't have been applied to uapi struct __sk_buff.
Ok, hold on. __bpf_md_ptr just creates a 8-byte sized and aligned union. It doesn't change the pointer itself in any way:
union { struct bpf_sock* sk; __u64 :64; };
It's a 64-bit pointer only because any pointer in the BPF target is 64-bit. But on 32-bit architectures such struct bpf_sock *sk pointer will *actually* be 4-byte pointer (and __u64 :64 will just make compiler add 4 bytes of padding after it, effectively), and BPF verifier will actually generate LDX instruction of BPF_W size (4 byte load):
case offsetof(struct __sk_buff, sk): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); break;
BPF_FIELD_SIZEOF(struct sk_buff, sk) is 4 for 32-bit kernels.
So while you are correct that it will be 8-byte load from the BPF side, allowing 4-byte load for such pointers should also be correct. It's our choice, there is no fundamental limitation why this shouldn't be the case.
Note also that we do this transformation when fentry/fexit/raw_tp_btf programs traverse pointers in kernel structures. There pretending like pointer to an 8-byte value is actually invalid. So libbpf adjusts such loads to 4-byte loads for CO-RE-relocatable types, which makes it all work transparently on 32-bit architectures. Context accesses deviate from that, as they came earlier and we didn't have CO-RE at that time.
So what you are saying is that __sk_buff shouldn't be CO-RE-relocatable, and yes, that would be good. But I think that's orthogonal in this case.
This issue should be from commit c1ff181ffabc ("selftests/bpf: Extend kfunc selftests") which replaced the uapi's bpf.h with vmlinux.h. One option to unblock this for now is to separate those tests that read __sk_buff->sk to its own prog.c and use the uapi's bpf.h instead of vmlinux.h.
It would be nice if the bpf-tc program can take 'struct sk_buff *skb' instead of 'struct __sk_buff *skb' but it will be a separate topic.
This patch adds kernel function call support to the 32-bit ARM bpf jit.
Signed-off-by: Yang Jihong yangjihong1@huawei.com --- arch/arm/net/bpf_jit_32.c | 130 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+)
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 6a1c9fca5260..51428c82bec6 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1337,6 +1337,118 @@ static void build_epilogue(struct jit_ctx *ctx) #endif }
+/* + * Input parameters of function in 32-bit ARM architecture: + * The first four word-sized parameters passed to a function will be + * transferred in registers R0-R3. Sub-word sized arguments, for example, + * char, will still use a whole register. + * Arguments larger than a word will be passed in multiple registers. + * If more arguments are passed, the fifth and subsequent words will be passed + * on the stack. + * + * The first for args of a function will be considered for + * putting into the 32bit register R1, R2, R3 and R4. + * + * Two 32bit registers are used to pass a 64bit arg. + * + * For example, + * void foo(u32 a, u32 b, u32 c, u32 d, u32 e): + * u32 a: R0 + * u32 b: R1 + * u32 c: R2 + * u32 d: R3 + * u32 e: stack + * + * void foo(u64 a, u32 b, u32 c, u32 d): + * u64 a: R0 (lo32) R1 (hi32) + * u32 b: R2 + * u32 c: R3 + * u32 d: stack + * + * void foo(u32 a, u64 b, u32 c, u32 d): + * u32 a: R0 + * u64 b: R2 (lo32) R3 (hi32) + * u32 c: stack + * u32 d: stack + * + * void foo(u32 a, u32 b, u64 c, u32 d): + * u32 a: R0 + * u32 b: R1 + * u64 c: R2 (lo32) R3 (hi32) + * u32 d: stack + * + * The return value will be stored in the R0 (and R1 for 64bit value). + * + * For example, + * u32 foo(u32 a, u32 b, u32 c): + * return value: R0 + * + * u64 foo(u32 a, u32 b, u32 c): + * return value: R0 (lo32) R1 (hi32) + */ +static int emit_kfunc_call(const struct bpf_insn *insn, struct jit_ctx *ctx, const u32 func) +{ + int i; + const struct btf_func_model *fm; + const s8 *tmp = bpf2a32[TMP_REG_1]; + const u8 arg_regs[] = { ARM_R0, ARM_R1, ARM_R2, ARM_R3 }; + int nr_arg_regs = ARRAY_SIZE(arg_regs); + int arg_regs_idx = 0, stack_off = 0; + + fm = bpf_jit_find_kfunc_model(ctx->prog, insn); + if (!fm) + return -EINVAL; + + for (i = 0; i < fm->nr_args; i++) { + if (fm->arg_size[i] > sizeof(u32)) { + if (arg_regs_idx + 1 < nr_arg_regs) { + /* + * AAPCS states: + * A double-word sized type is passed in two + * consecutive registers (e.g., r0 and r1, or + * r2 and r3). The content of the registers is + * as if the value had been loaded from memory + * representation with a single LDM instruction. + */ + if (arg_regs_idx & 1) + arg_regs_idx++; + + emit(ARM_LDRD_I(arg_regs[arg_regs_idx], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP( + bpf2a32[BPF_REG_1 + i][1])), ctx); + + arg_regs_idx += 2; + } else { + stack_off = ALIGN(stack_off, STACK_ALIGNMENT); + + emit(ARM_LDRD_I(tmp[1], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP( + bpf2a32[BPF_REG_1 + i][1])), ctx); + emit(ARM_STRD_I(tmp[1], ARM_SP, stack_off), ctx); + + stack_off += 8; + } + } else { + if (arg_regs_idx + 1 < nr_arg_regs) { + emit_a32_mov_r(arg_regs[arg_regs_idx++], + bpf2a32[BPF_REG_1 + i][1], ctx); + } else { + emit(ARM_LDR_I(tmp[1], ARM_FP, + EBPF_SCRATCH_TO_ARM_FP( + bpf2a32[BPF_REG_1 + i][1])), ctx); + emit(ARM_STR_I(tmp[1], ARM_SP, stack_off), ctx); + + stack_off += 4; + } + } + } + + emit_a32_mov_i(tmp[1], func, ctx); + emit_blx_r(tmp[1], ctx); + + return 0; +} + /* * Convert an eBPF instruction to native instruction, i.e * JITs an eBPF instruction. @@ -1603,6 +1715,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_B: case BPF_LDX | BPF_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEM | BPF_B: + case BPF_LDX | BPF_PROBE_MEM | BPF_DW: rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code)); break; @@ -1785,6 +1901,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) const s8 *r5 = bpf2a32[BPF_REG_5]; const u32 func = (u32)__bpf_call_base + (u32)imm;
+ if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { + int err; + + err = emit_kfunc_call(insn, ctx, func); + + if (err) + return err; + break; + } + emit_a32_mov_r64(true, r0, r1, ctx); emit_a32_mov_r64(true, r1, r2, ctx); emit_push_r64(r5, ctx); @@ -2022,3 +2148,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) return prog; }
+bool bpf_jit_supports_kfunc_call(void) +{ + return true; +}
for function foo(u32 a, u64 b, u32 c) in 32-bit ARM: a is in r0, b is in r2-r3, c is stored on the stack. Because the AAPCS states: "A double-word sized type is passed in two consecutive registers (e.g., r0 and r1, or r2 and r3). The content of the registers is as if the value had been loaded from memory representation with a single LDM instruction." Supplement the test cases in this case.
Signed-off-by: Yang Jihong yangjihong1@huawei.com --- net/bpf/test_run.c | 6 +++++ .../selftests/bpf/prog_tests/kfunc_call.c | 1 + .../selftests/bpf/progs/kfunc_call_test.c | 23 +++++++++++++++++++ 3 files changed, 30 insertions(+)
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 13d578ce2a09..bdfb3081e1ce 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -551,6 +551,11 @@ struct sock * noinline bpf_kfunc_call_test3(struct sock *sk) return sk; }
+u64 noinline bpf_kfunc_call_test4(struct sock *sk, u64 a, u64 b, u32 c, u32 d) +{ + return a + b + c + d; +} + struct prog_test_member1 { int a; }; @@ -739,6 +744,7 @@ BTF_SET8_START(test_sk_check_kfunc_ids) BTF_ID_FLAGS(func, bpf_kfunc_call_test1) BTF_ID_FLAGS(func, bpf_kfunc_call_test2) BTF_ID_FLAGS(func, bpf_kfunc_call_test3) +BTF_ID_FLAGS(func, bpf_kfunc_call_test4) BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE) diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 5af1ee8f0e6e..13a105bb05ed 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -72,6 +72,7 @@ static struct kfunc_test_params kfunc_tests[] = { /* success cases */ TC_TEST(kfunc_call_test1, 12), TC_TEST(kfunc_call_test2, 3), + TC_TEST(kfunc_call_test4, 16), TC_TEST(kfunc_call_test_ref_btf_id, 0), TC_TEST(kfunc_call_test_get_mem, 42), SYSCALL_TEST(kfunc_syscall_test, 0), diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index f636e50be259..7cccb014d26e 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -6,6 +6,8 @@ extern int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym; extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b, __u32 c, __u64 d) __ksym; +extern __u64 bpf_kfunc_call_test4(struct sock *sk, __u64 a, __u64 b, + __u32 c, __u32 d) __ksym;
extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; @@ -17,6 +19,27 @@ extern void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym; extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym; extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym;
+SEC("tc") +int kfunc_call_test4(struct __sk_buff *skb) +{ + struct bpf_sock *sk = skb->sk; + __u64 a = 1ULL << 32; + __u32 ret; + + if (!sk) + return -1; + + sk = bpf_sk_fullsock(sk); + if (!sk) + return -1; + + a = bpf_kfunc_call_test4((struct sock *)sk, a | 2, a | 3, 4, 5); + ret = a >> 32; /* ret should be 2 */ + ret += (__u32)a; /* ret should be 16 */ + + return ret; +} + SEC("tc") int kfunc_call_test2(struct __sk_buff *skb) {
linux-kselftest-mirror@lists.linaro.org