If any allocation in the pkt_stream_*() helpers fail, exit_on_error() is called. This terminates the program immediately and can lead to memory leaks.
Return NULL in case of allocation failure. Return TEST_FAILURE when something goes wrong in the packet generation.
Signed-off-by: Bastien Curutchet (eBPF Foundation) bastien.curutchet@bootlin.com --- tools/testing/selftests/bpf/xskxceiver.c | 97 +++++++++++++++++++++++--------- 1 file changed, 71 insertions(+), 26 deletions(-)
diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 41c744c627d9b8acffc687f6893a01557e6556c5..5b31c694fb0e104a81369b7ee2e82dc6ee190b4d 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -733,7 +733,7 @@ static struct pkt_stream *__pkt_stream_generate(u32 nb_pkts, u32 pkt_len, u32 nb
pkt_stream = __pkt_stream_alloc(nb_pkts); if (!pkt_stream) - exit_with_error(ENOMEM); + return NULL;
pkt_stream->nb_pkts = nb_pkts; pkt_stream->max_pkt_len = pkt_len; @@ -757,36 +757,51 @@ static struct pkt_stream *pkt_stream_clone(struct pkt_stream *pkt_stream) return pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len); }
-static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len) +static int pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len) { struct pkt_stream *pkt_stream;
pkt_stream = pkt_stream_generate(nb_pkts, pkt_len); + if (!pkt_stream) + return -ENOMEM; test->ifobj_tx->xsk->pkt_stream = pkt_stream; pkt_stream = pkt_stream_generate(nb_pkts, pkt_len); + if (!pkt_stream) + return -ENOMEM; test->ifobj_rx->xsk->pkt_stream = pkt_stream; + + return 0; }
-static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len, +static int __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len, int offset) { struct pkt_stream *pkt_stream; u32 i;
pkt_stream = pkt_stream_clone(ifobj->xsk->pkt_stream); + if (!pkt_stream) + return -ENOMEM; + for (i = 1; i < ifobj->xsk->pkt_stream->nb_pkts; i += 2) pkt_stream_pkt_set(pkt_stream, &pkt_stream->pkts[i], offset, pkt_len);
ifobj->xsk->pkt_stream = pkt_stream; + + return 0; }
-static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset) +static int pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset) { - __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset); - __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset); + int ret = __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset); + + if (ret) + return ret; + + return __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset); }
-static void pkt_stream_receive_half(struct test_spec *test) +static int pkt_stream_receive_half(struct test_spec *test) { struct pkt_stream *pkt_stream = test->ifobj_tx->xsk->pkt_stream; u32 i; @@ -800,14 +815,19 @@ static void pkt_stream_receive_half(struct test_spec *test)
test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(pkt_stream->nb_pkts, pkt_stream->pkts[0].len); + if (!test->ifobj_rx->xsk->pkt_stream) + return -ENOMEM; + pkt_stream = test->ifobj_rx->xsk->pkt_stream; for (i = 1; i < pkt_stream->nb_pkts; i += 2) pkt_stream->pkts[i].valid = false;
pkt_stream->nb_valid_entries /= 2; + + return 0; }
-static void pkt_stream_even_odd_sequence(struct test_spec *test) +static int pkt_stream_even_odd_sequence(struct test_spec *test) { struct pkt_stream *pkt_stream; u32 i; @@ -816,13 +836,19 @@ static void pkt_stream_even_odd_sequence(struct test_spec *test) pkt_stream = test->ifobj_tx->xsk_arr[i].pkt_stream; pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2, pkt_stream->pkts[0].len, i, 2); + if (!pkt_stream) + return -ENOMEM; test->ifobj_tx->xsk_arr[i].pkt_stream = pkt_stream;
pkt_stream = test->ifobj_rx->xsk_arr[i].pkt_stream; pkt_stream = __pkt_stream_generate(pkt_stream->nb_pkts / 2, pkt_stream->pkts[0].len, i, 2); + if (!pkt_stream) + return -ENOMEM; test->ifobj_rx->xsk_arr[i].pkt_stream = pkt_stream; } + + return 0; }
static void release_even_odd_sequence(struct test_spec *test) @@ -881,7 +907,7 @@ static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, s
pkt_stream = __pkt_stream_alloc(nb_frames); if (!pkt_stream) - exit_with_error(ENOMEM); + return NULL;
for (i = 0; i < nb_frames; i++) { struct pkt *pkt = &pkt_stream->pkts[pkt_nb]; @@ -924,15 +950,21 @@ static struct pkt_stream *__pkt_stream_generate_custom(struct ifobject *ifobj, s return pkt_stream; }
-static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts) +static int pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts) { struct pkt_stream *pkt_stream;
pkt_stream = __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts, true); + if (!pkt_stream) + return -ENOMEM; test->ifobj_tx->xsk->pkt_stream = pkt_stream;
pkt_stream = __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts, false); + if (!pkt_stream) + return -ENOMEM; test->ifobj_rx->xsk->pkt_stream = pkt_stream; + + return 0; }
static void pkt_print_data(u32 *data, u32 cnt) @@ -2101,24 +2133,28 @@ static int testapp_stats_rx_dropped(struct test_spec *test) return TEST_SKIP; }
- pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0); + if (pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0)) + return TEST_FAILURE; test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3; - pkt_stream_receive_half(test); + if (pkt_stream_receive_half(test)) + return TEST_FAILURE; test->ifobj_rx->validation_func = validate_rx_dropped; return testapp_validate_traffic(test); }
static int testapp_stats_tx_invalid_descs(struct test_spec *test) { - pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0); + if (pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0)) + return TEST_FAILURE; test->ifobj_tx->validation_func = validate_tx_invalid_descs; return testapp_validate_traffic(test); }
static int testapp_stats_rx_full(struct test_spec *test) { - pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); + if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE)) + return TEST_FAILURE; pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream); test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
@@ -2130,7 +2166,8 @@ static int testapp_stats_rx_full(struct test_spec *test)
static int testapp_stats_fill_empty(struct test_spec *test) { - pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); + if (pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE)) + return TEST_FAILURE; pkt_stream_delete(test->ifobj_rx->xsk->pkt_stream); test->ifobj_rx->xsk->pkt_stream = pkt_stream_generate(DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE);
@@ -2144,7 +2181,8 @@ static int testapp_send_receive_unaligned(struct test_spec *test) test->ifobj_tx->umem->unaligned_mode = true; test->ifobj_rx->umem->unaligned_mode = true; /* Let half of the packets straddle a 4K buffer boundary */ - pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2); + if (pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2)) + return TEST_FAILURE;
return testapp_validate_traffic(test); } @@ -2154,7 +2192,8 @@ static int testapp_send_receive_unaligned_mb(struct test_spec *test) test->mtu = MAX_ETH_JUMBO_SIZE; test->ifobj_tx->umem->unaligned_mode = true; test->ifobj_rx->umem->unaligned_mode = true; - pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE); + if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE)) + return TEST_FAILURE; return testapp_validate_traffic(test); }
@@ -2162,14 +2201,16 @@ static int testapp_single_pkt(struct test_spec *test) { struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}};
- pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); + if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts))) + return TEST_FAILURE; return testapp_validate_traffic(test); }
static int testapp_send_receive_mb(struct test_spec *test) { test->mtu = MAX_ETH_JUMBO_SIZE; - pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE); + if (pkt_stream_replace(test, DEFAULT_PKT_CNT, MAX_ETH_JUMBO_SIZE)) + return TEST_FAILURE;
return testapp_validate_traffic(test); } @@ -2210,7 +2251,8 @@ static int testapp_invalid_desc_mb(struct test_spec *test) }
test->mtu = MAX_ETH_JUMBO_SIZE; - pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); + if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts))) + return TEST_FAILURE; return testapp_validate_traffic(test); }
@@ -2255,7 +2297,8 @@ static int testapp_invalid_desc(struct test_spec *test) pkts[6].offset += umem_size; }
- pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); + if (pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts))) + return TEST_FAILURE; return testapp_validate_traffic(test); }
@@ -2267,7 +2310,8 @@ static int testapp_xdp_drop(struct test_spec *test) test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_drop, skel_tx->progs.xsk_xdp_drop, skel_rx->maps.xsk, skel_tx->maps.xsk);
- pkt_stream_receive_half(test); + if (pkt_stream_receive_half(test)) + return TEST_FAILURE; return testapp_validate_traffic(test); }
@@ -2311,7 +2355,8 @@ static int testapp_xdp_shared_umem(struct test_spec *test) skel_tx->progs.xsk_xdp_shared_umem, skel_rx->maps.xsk, skel_tx->maps.xsk);
- pkt_stream_even_odd_sequence(test); + if (pkt_stream_even_odd_sequence(test)) + return TEST_FAILURE;
ret = testapp_validate_traffic(test);
@@ -2339,7 +2384,7 @@ static int testapp_too_many_frags(struct test_spec *test) { struct pkt *pkts; u32 max_frags, i; - int ret; + int ret = TEST_FAILURE;
if (test->mode == TEST_MODE_ZC) { max_frags = test->ifobj_tx->xdp_zc_max_segs; @@ -2384,8 +2429,8 @@ static int testapp_too_many_frags(struct test_spec *test) pkts[2 * max_frags + 1].len = MIN_PKT_SIZE; pkts[2 * max_frags + 1].valid = true;
- pkt_stream_generate_custom(test, pkts, 2 * max_frags + 2); - ret = testapp_validate_traffic(test); + if (!pkt_stream_generate_custom(test, pkts, 2 * max_frags + 2)) + ret = testapp_validate_traffic(test);
free(pkts); return ret;