On Thu, 04 Jan 2024 at 04:27:00 +0200, Eduard Zingerman wrote:
On Wed, 2023-12-20 at 23:40 +0200, Maxim Mikityanskiy wrote:
[...]
The two tests below were added by the following commit: ef979017b837 ("bpf: selftest: Add verifier tests for <8-byte scalar spill and refill")
As far as I understand, the original intent was to check the behavior for stack read/write with non-matching size. I think these tests are redundant after patch #13. Wdyt?
_6_offset_to_skb_data is for sure not redundant. I don't test a partial fill from the most significant bits in my patch 13.
u16_offset_to_skb_data is somewhat similar to fill_32bit_after_spill_64bit, but they aren't exactly the same: the former spills (u32)20 and fills (u16)20 (the same value), while my test spills (u64)0xXXXXXXXX00000000 and fills (u32)0 (the most significant bits are stripped). Maybe u16_offset_to_skb_data is redundant, but more coverage is better than less coverage, isn't it?
diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c index 809a09732168..de03e72e07a9 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c @@ -217,7 +217,7 @@ __naked void uninit_u32_from_the_stack(void) SEC("tc") __description("Spill a u32 const scalar. Refill as u16. Offset to skb->data") -__failure __msg("invalid access to packet") +__success __retval(0) __naked void u16_offset_to_skb_data(void) { asm volatile (" \ @@ -225,19 +225,24 @@ __naked void u16_offset_to_skb_data(void) r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ w4 = 20; \ *(u32*)(r10 - 8) = r4; \
- r4 = *(u16*)(r10 - 8); \
- r4 = *(u16*)(r10 - %[offset]); \ r0 = r2; \
- /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
- /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */\ r0 += r4; \
- /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
- /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\ if r0 > r3 goto l0_%=; \
- /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
- /* r0 = *(u32 *)r2 R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\ r0 = *(u32*)(r2 + 0); \
l0_%=: r0 = 0; \ exit; \ " : : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
__imm_const(offset, 8)
+#else
__imm_const(offset, 6)
+#endif : __clobber_all); } @@ -270,7 +275,7 @@ l0_%=: r0 = 0; \ } SEC("tc") -__description("Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data") +__description("Spill a u32 const scalar. Refill as u16 from MSB. Offset to skb->data") __failure __msg("invalid access to packet") __naked void _6_offset_to_skb_data(void) { @@ -279,7 +284,7 @@ __naked void _6_offset_to_skb_data(void) r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ w4 = 20; \ *(u32*)(r10 - 8) = r4; \
- r4 = *(u16*)(r10 - 6); \
- r4 = *(u16*)(r10 - %[offset]); \ r0 = r2; \ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ r0 += r4; \
@@ -291,7 +296,12 @@ l0_%=: r0 = 0; \ exit; \ " : : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
__imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
__imm_const(offset, 6)
+#else
__imm_const(offset, 8)
+#endif : __clobber_all); }
linux-kselftest-mirror@lists.linaro.org