The flow_dissector runs plenty of tests over diffent kind of packets, grouped into three categories: skb mode, non-skb mode with direct attach, and non-skb with indirect attach.
Re-split the main function into dedicated tests. Each test now must have its own setup/teardown, but for the advantage of being able to run them separately. While at it, make sure that tests attaching the bpf programs are run in a dedicated ns.
Signed-off-by: Alexis Lothoré (eBPF Foundation) alexis.lothore@bootlin.com --- Changes in v2: - fix some error path sequences (eg: closing properly tap interface) - isolate tests in dedicated ns - do not make the tests as "serial" tests --- .../selftests/bpf/prog_tests/flow_dissector.c | 108 ++++++++++++++------- 1 file changed, 73 insertions(+), 35 deletions(-)
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index 6fbe8b6dad561aec02db552caea02517ac1e2109..7e7051a85be7410d4c636af8cd58206a76afe49e 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -549,63 +549,117 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys) } }
-static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd) +void test_flow_dissector_skb_less_direct_attach(void) { - int err, prog_fd; + int err, prog_fd, tap_fd; + struct bpf_flow *skel; + struct netns_obj *ns; + + ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true); + if (!ASSERT_OK_PTR(ns, "create and open netns")) + return; + + skel = bpf_flow__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open/load skeleton")) + goto out_clean_ns; + + err = init_prog_array(skel->obj, skel->maps.jmp_table); + if (!ASSERT_OK(err, "init_prog_array")) + goto out_destroy_skel;
prog_fd = bpf_program__fd(skel->progs._dissect); if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd")) - return; + goto out_destroy_skel;
err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); if (!ASSERT_OK(err, "bpf_prog_attach")) - return; + goto out_destroy_skel; + + tap_fd = create_tap("tap0"); + if (!ASSERT_OK_FD(tap_fd, "create_tap")) + goto out_destroy_skel; + err = ifup("tap0"); + if (!ASSERT_OK(err, "ifup")) + goto out_close_tap;
run_tests_skb_less(tap_fd, skel->maps.last_dissection);
err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); ASSERT_OK(err, "bpf_prog_detach2"); + +out_close_tap: + close(tap_fd); +out_destroy_skel: + bpf_flow__destroy(skel); +out_clean_ns: + netns_free(ns); }
-static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd) +void test_flow_dissector_skb_less_indirect_attach(void) { + int err, net_fd, tap_fd; + struct bpf_flow *skel; struct bpf_link *link; - int err, net_fd; + struct netns_obj *ns; + + ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true); + if (!ASSERT_OK_PTR(ns, "create and open netns")) + return; + + skel = bpf_flow__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open/load skeleton")) + goto out_clean_ns;
net_fd = open("/proc/self/ns/net", O_RDONLY); if (!ASSERT_OK_FD(net_fd, "open(/proc/self/ns/net")) - return; + goto out_destroy_skel; + + err = init_prog_array(skel->obj, skel->maps.jmp_table); + if (!ASSERT_OK(err, "init_prog_array")) + goto out_destroy_skel; + + tap_fd = create_tap("tap0"); + if (!ASSERT_OK_FD(tap_fd, "create_tap")) + goto out_close_ns; + err = ifup("tap0"); + if (!ASSERT_OK(err, "ifup")) + goto out_close_tap;
link = bpf_program__attach_netns(skel->progs._dissect, net_fd); if (!ASSERT_OK_PTR(link, "attach_netns")) - goto out_close; + goto out_close_tap;
run_tests_skb_less(tap_fd, skel->maps.last_dissection);
err = bpf_link__destroy(link); ASSERT_OK(err, "bpf_link__destroy"); -out_close: + +out_close_tap: + close(tap_fd); +out_close_ns: close(net_fd); +out_destroy_skel: + bpf_flow__destroy(skel); +out_clean_ns: + netns_free(ns); }
-void test_flow_dissector(void) +void test_flow_dissector_skb(void) { - int i, err, prog_fd, keys_fd = -1, tap_fd; struct bpf_flow *skel; + int i, err, prog_fd;
skel = bpf_flow__open_and_load(); if (!ASSERT_OK_PTR(skel, "open/load skeleton")) return;
- prog_fd = bpf_program__fd(skel->progs._dissect); - if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd")) - return; - keys_fd = bpf_map__fd(skel->maps.last_dissection); - if (!ASSERT_OK_FD(keys_fd, "bpf_map__fd")) - return; err = init_prog_array(skel->obj, skel->maps.jmp_table); if (!ASSERT_OK(err, "init_prog_array")) - return; + goto out_destroy_skel; + + prog_fd = bpf_program__fd(skel->progs._dissect); + if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd")) + goto out_destroy_skel;
for (i = 0; i < ARRAY_SIZE(tests); i++) { struct bpf_flow_keys flow_keys; @@ -635,24 +689,8 @@ void test_flow_dissector(void) sizeof(struct bpf_flow_keys), "returned flow keys"); } - /* Do the same tests but for skb-less flow dissector. - * We use a known path in the net/tun driver that calls - * eth_get_headlen and we manually export bpf_flow_keys - * via BPF map in this case. - */ - tap_fd = create_tap("tap0"); - if (!ASSERT_OK_FD(tap_fd, "create_tap")) - goto out_destroy_skel; - err = ifup("tap0"); - if (!ASSERT_OK(err, "ifup")) - goto out_destroy_skel; - - /* Test direct prog attachment */ - test_skb_less_prog_attach(skel, tap_fd); - /* Test indirect prog attachment via link */ - test_skb_less_link_create(skel, tap_fd);
- close(tap_fd); out_destroy_skel: bpf_flow__destroy(skel); } +