Extend bpf_skb_adjust_room growth to mark inner MAC header so that L2 encapsulation can be used for tc tunnels.
Patch #1 extends the existing test_tc_tunnel to support UDP encapsulation; later we want to be able to test MPLS over UDP and MPLS over GRE encapsulation.
Patch #2 adds the BPF_F_ADJ_ROOM_ENCAP_L2(len) macro, which allows specification of inner mac length. Other approaches were explored prior to taking this approach. Specifically, I tried automatically computing the inner mac length on the basis of the specified flags (so inner maclen for GRE/IPv4 encap is the len_diff specified to bpf_skb_adjust_room minus GRE + IPv4 header length for example). Problem with this is that we don't know for sure what form of GRE/UDP header we have; is it a full GRE header, or is it a FOU UDP header or generic UDP encap header? My fear here was we'd end up with an explosion of flags. The other approach tried was to support inner L2 header marking as a separate room adjustment, i.e. adjust for L3/L4 encap, then call bpf_skb_adjust_room for L2 encap. This can be made to work but because it imposed an order on operations, felt a bit clunky.
Patch #3 syncs tools/ bpf.h.
Patch #4 extends the tests again to support MPLSoverGRE and MPLSoverUDP encap, along with existing test coverage.
Alan Maguire (4): selftests_bpf: extend test_tc_tunnel for UDP encap bpf: add layer 2 encap support to bpf_skb_adjust_room bpf: sync bpf.h to tools/ for BPF_F_ADJ_ROOM_ENCAP_L2 selftests_bpf: extend test_tc_tunnel.sh test for L2 encap
include/uapi/linux/bpf.h | 5 + net/core/filter.c | 19 +- tools/include/uapi/linux/bpf.h | 5 + tools/testing/selftests/bpf/progs/test_tc_tunnel.c | 281 ++++++++++++++++----- tools/testing/selftests/bpf/test_tc_tunnel.sh | 105 +++++--- 5 files changed, 318 insertions(+), 97 deletions(-)
In
commit 868d523535c2 ("bpf: add bpf_skb_adjust_room encap flags")
...Willem introduced support to bpf_skb_adjust_room for GSO-friendly GRE and UDP encapsulation and later introduced associated test_tc_tunnel tests. Here those tests are extended to cover UDP encapsulation also.
Signed-off-by: Alan Maguire alan.maguire@oracle.com --- tools/testing/selftests/bpf/progs/test_tc_tunnel.c | 149 ++++++++++++++------- tools/testing/selftests/bpf/test_tc_tunnel.sh | 72 +++++++--- 2 files changed, 157 insertions(+), 64 deletions(-)
diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c index f541c2d..cc88379 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c +++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c @@ -12,6 +12,7 @@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/tcp.h> +#include <linux/udp.h> #include <linux/pkt_cls.h> #include <linux/types.h>
@@ -20,16 +21,27 @@
static const int cfg_port = 8000;
-struct grev4hdr { - struct iphdr ip; +static const int cfg_udp_src = 20000; +static const int cfg_udp_dst = 5555; + +struct gre_hdr { __be16 flags; __be16 protocol; } __attribute__((packed));
-struct grev6hdr { +union l4hdr { + struct udphdr udp; + struct gre_hdr gre; +}; + +struct v4hdr { + struct iphdr ip; + union l4hdr l4hdr; +} __attribute__((packed)); + +struct v6hdr { struct ipv6hdr ip; - __be16 flags; - __be16 protocol; + union l4hdr l4hdr; } __attribute__((packed));
static __always_inline void set_ipv4_csum(struct iphdr *iph) @@ -47,10 +59,11 @@ static __always_inline void set_ipv4_csum(struct iphdr *iph) iph->check = ~((csum & 0xffff) + (csum >> 16)); }
-static __always_inline int encap_ipv4(struct __sk_buff *skb, bool with_gre) +static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto) { - struct grev4hdr h_outer; struct iphdr iph_inner; + struct v4hdr h_outer; + struct udphdr *udph; struct tcphdr tcph; __u64 flags; int olen; @@ -70,12 +83,29 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, bool with_gre) if (tcph.dest != __bpf_constant_htons(cfg_port)) return TC_ACT_OK;
- flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV4; - if (with_gre) { - flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE; - olen = sizeof(h_outer); - } else { - olen = sizeof(h_outer.ip); + olen = sizeof(h_outer.ip); + + flags = BPF_F_ADJ_ROOM_ENCAP_L3_IPV4; + switch (encap_proto) { + case IPPROTO_GRE: + flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE | BPF_F_ADJ_ROOM_FIXED_GSO; + olen += sizeof(h_outer.l4hdr.gre); + h_outer.l4hdr.gre.protocol = bpf_htons(ETH_P_IP); + h_outer.l4hdr.gre.flags = 0; + break; + case IPPROTO_UDP: + flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP; + olen += sizeof(h_outer.l4hdr.udp); + h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src); + h_outer.l4hdr.udp.dest = __bpf_constant_htons(cfg_udp_dst); + h_outer.l4hdr.udp.check = 0; + h_outer.l4hdr.udp.len = bpf_htons(bpf_ntohs(iph_inner.tot_len) + + sizeof(h_outer.l4hdr.udp)); + break; + case IPPROTO_IPIP: + break; + default: + return TC_ACT_OK; }
/* add room between mac and network header */ @@ -85,16 +115,10 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, bool with_gre) /* prepare new outer network header */ h_outer.ip = iph_inner; h_outer.ip.tot_len = bpf_htons(olen + - bpf_htons(h_outer.ip.tot_len)); - if (with_gre) { - h_outer.ip.protocol = IPPROTO_GRE; - h_outer.protocol = bpf_htons(ETH_P_IP); - h_outer.flags = 0; - } else { - h_outer.ip.protocol = IPPROTO_IPIP; - } + bpf_htons(h_outer.ip.tot_len)); + h_outer.ip.protocol = encap_proto;
- set_ipv4_csum((void *)&h_outer.ip); + set_ipv4_csum(&h_outer.ip);
/* store new outer network header */ if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen, @@ -104,11 +128,12 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, bool with_gre) return TC_ACT_OK; }
-static __always_inline int encap_ipv6(struct __sk_buff *skb, bool with_gre) +static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto) { struct ipv6hdr iph_inner; - struct grev6hdr h_outer; + struct v6hdr h_outer; struct tcphdr tcph; + __u16 tot_len; __u64 flags; int olen;
@@ -124,14 +149,31 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, bool with_gre) if (tcph.dest != __bpf_constant_htons(cfg_port)) return TC_ACT_OK;
- flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV6; - if (with_gre) { - flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE; - olen = sizeof(h_outer); - } else { - olen = sizeof(h_outer.ip); - } + olen = sizeof(h_outer.ip);
+ flags = BPF_F_ADJ_ROOM_ENCAP_L3_IPV6; + switch (encap_proto) { + case IPPROTO_GRE: + flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE | BPF_F_ADJ_ROOM_FIXED_GSO; + olen += sizeof(h_outer.l4hdr.gre); + h_outer.l4hdr.gre.protocol = bpf_htons(ETH_P_IPV6); + h_outer.l4hdr.gre.flags = 0; + break; + case IPPROTO_UDP: + flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP; + olen += sizeof(h_outer.l4hdr.udp); + h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src); + h_outer.l4hdr.udp.dest = __bpf_constant_htons(cfg_udp_dst); + h_outer.l4hdr.udp.check = 0; + tot_len = bpf_ntohs(iph_inner.payload_len) + sizeof(iph_inner); + h_outer.l4hdr.udp.len = bpf_htons(tot_len + + sizeof(h_outer.l4hdr.udp)); + break; + case IPPROTO_IPV6: + break; + default: + return TC_ACT_OK; + }
/* add room between mac and network header */ if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags)) @@ -141,13 +183,8 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, bool with_gre) h_outer.ip = iph_inner; h_outer.ip.payload_len = bpf_htons(olen + bpf_ntohs(h_outer.ip.payload_len)); - if (with_gre) { - h_outer.ip.nexthdr = IPPROTO_GRE; - h_outer.protocol = bpf_htons(ETH_P_IPV6); - h_outer.flags = 0; - } else { - h_outer.ip.nexthdr = IPPROTO_IPV6; - } + + h_outer.ip.nexthdr = encap_proto;
/* store new outer network header */ if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen, @@ -161,7 +198,7 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, bool with_gre) int __encap_ipip(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) - return encap_ipv4(skb, false); + return encap_ipv4(skb, IPPROTO_IPIP); else return TC_ACT_OK; } @@ -170,7 +207,16 @@ int __encap_ipip(struct __sk_buff *skb) int __encap_gre(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) - return encap_ipv4(skb, true); + return encap_ipv4(skb, IPPROTO_GRE); + else + return TC_ACT_OK; +} + +SEC("encap_udp") +int __encap_udp(struct __sk_buff *skb) +{ + if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) + return encap_ipv4(skb, IPPROTO_UDP); else return TC_ACT_OK; } @@ -179,7 +225,7 @@ int __encap_gre(struct __sk_buff *skb) int __encap_ip6tnl(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) - return encap_ipv6(skb, false); + return encap_ipv6(skb, IPPROTO_IPV6); else return TC_ACT_OK; } @@ -188,23 +234,34 @@ int __encap_ip6tnl(struct __sk_buff *skb) int __encap_ip6gre(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) - return encap_ipv6(skb, true); + return encap_ipv6(skb, IPPROTO_GRE); + else + return TC_ACT_OK; +} + +SEC("encap_ip6udp") +int __encap_ip6udp(struct __sk_buff *skb) +{ + if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) + return encap_ipv6(skb, IPPROTO_UDP); else return TC_ACT_OK; }
static int decap_internal(struct __sk_buff *skb, int off, int len, char proto) { - char buf[sizeof(struct grev6hdr)]; - int olen; + char buf[sizeof(struct v6hdr)]; + int olen = len;
switch (proto) { case IPPROTO_IPIP: case IPPROTO_IPV6: - olen = len; break; case IPPROTO_GRE: - olen = len + 4 /* gre hdr */; + olen += sizeof(struct gre_hdr); + break; + case IPPROTO_UDP: + olen += sizeof(struct udphdr); break; default: return TC_ACT_OK; diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh index c805adb..3ae54f0 100755 --- a/tools/testing/selftests/bpf/test_tc_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh @@ -15,6 +15,9 @@ readonly ns2_v4=192.168.1.2 readonly ns1_v6=fd::1 readonly ns2_v6=fd::2
+# Must match port used by bpf program +readonly udpport=5555 + readonly infile="$(mktemp)" readonly outfile="$(mktemp)"
@@ -103,6 +106,18 @@ if [[ "$#" -eq "0" ]]; then echo "ip6 gre gso" $0 ipv6 ip6gre 2000
+ echo "ip udp" + $0 ipv4 udp 100 + + echo "ip6 udp" + $0 ipv6 ip6udp 100 + + echo "ip udp gso" + $0 ipv4 udp 2000 + + echo "ip6 udp gso" + $0 ipv6 ip6udp 2000 + echo "OK. All tests passed" exit 0 fi @@ -117,12 +132,14 @@ case "$1" in "ipv4") readonly addr1="${ns1_v4}" readonly addr2="${ns2_v4}" - readonly netcat_opt=-4 + readonly ipproto=4 + readonly netcat_opt=-${ipproto} ;; "ipv6") readonly addr1="${ns1_v6}" readonly addr2="${ns2_v6}" - readonly netcat_opt=-6 + readonly ipproto=6 + readonly netcat_opt=-${ipproto} ;; *) echo "unknown arg: $1" @@ -158,27 +175,46 @@ server_listen # serverside, insert decap module # server is still running # client can connect again -ip netns exec "${ns2}" ip link add dev testtun0 type "${tuntype}" \ - remote "${addr1}" local "${addr2}" -# Because packets are decapped by the tunnel they arrive on testtun0 from -# the IP stack perspective. Ensure reverse path filtering is disabled -# otherwise we drop the TCP SYN as arriving on testtun0 instead of the -# expected veth2 (veth2 is where 192.168.1.2 is configured). -ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.all.rp_filter=0 -# rp needs to be disabled for both all and testtun0 as the rp value is -# selected as the max of the "all" and device-specific values. -ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.testtun0.rp_filter=0 -ip netns exec "${ns2}" ip link set dev testtun0 up -echo "test bpf encap with tunnel device decap" -client_connect -verify_data + +# Skip tunnel tests for ip6udp. For IPv6, a UDP checksum is required +# and there seems to be no way to tell a fou6 tunnel to allow 0 +# checksums. Accordingly for both these cases, we skip tests against +# tunnel peer, and test encap using BPF decap only. +if [[ "$tuntype" != "ip6udp" ]]; then + if [[ "$tuntype" == "udp" ]]; then + # Set up fou tunnel. + ttype=ipip + targs="encap fou encap-sport auto encap-dport $udpport" + # fou may be a module; allow this to fail. + modprobe fou ||true + ip netns exec "${ns2}" ip fou add port 5555 ipproto "${ipproto}" + else + ttype=$tuntype + targs="" + fi + ip netns exec "${ns2}" ip link add name testtun0 type "${ttype}" \ + remote "${addr1}" local "${addr2}" $targs + # Because packets are decapped by the tunnel they arrive on testtun0 + # from the IP stack perspective. Ensure reverse path filtering is + # disabled otherwise we drop the TCP SYN as arriving on testtun0 + # instead of the expected veth2 (veth2 is where 192.168.1.2 is + # configured). + ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.all.rp_filter=0 + # rp needs to be disabled for both all and testtun0 as the rp value is + # selected as the max of the "all" and device-specific values. + ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.testtun0.rp_filter=0 + ip netns exec "${ns2}" ip link set dev testtun0 up + echo "test bpf encap with tunnel device decap" + client_connect + verify_data + ip netns exec "${ns2}" ip link del dev testtun0 + server_listen +fi
# serverside, use BPF for decap -ip netns exec "${ns2}" ip link del dev testtun0 ip netns exec "${ns2}" tc qdisc add dev veth2 clsact ip netns exec "${ns2}" tc filter add dev veth2 ingress \ bpf direct-action object-file ./test_tc_tunnel.o section decap -server_listen echo "test bpf encap with bpf decap" client_connect verify_data
In
commit 868d523535c2 ("bpf: add bpf_skb_adjust_room encap flags")
...Willem introduced support to bpf_skb_adjust_room for GSO-friendly
nit: please avoid unnecessary vertical whitespace. Explicit mention of author is also not very relevant. I suggest "Commit XXX ("..") introduced support [..]. Here and in other patches.
GRE and UDP encapsulation and later introduced associated test_tc_tunnel tests. Here those tests are extended to cover UDP encapsulation also.
Signed-off-by: Alan Maguire alan.maguire@oracle.com
-static __always_inline int encap_ipv4(struct __sk_buff *skb, bool with_gre) +static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto) {
struct grev4hdr h_outer; struct iphdr iph_inner;
struct v4hdr h_outer;
struct udphdr *udph; struct tcphdr tcph; __u64 flags; int olen;
@@ -70,12 +83,29 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, bool with_gre) if (tcph.dest != __bpf_constant_htons(cfg_port)) return TC_ACT_OK;
flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV4;
if (with_gre) {
flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE;
olen = sizeof(h_outer);
} else {
olen = sizeof(h_outer.ip);
olen = sizeof(h_outer.ip);
flags = BPF_F_ADJ_ROOM_ENCAP_L3_IPV4;
Please keep BPF_F_ADJ_ROOM_FIXED_GSO enabled on all variants. Here and in IPv6.
switch (encap_proto) {
case IPPROTO_GRE:
flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE | BPF_F_ADJ_ROOM_FIXED_GSO;
olen += sizeof(h_outer.l4hdr.gre);
h_outer.l4hdr.gre.protocol = bpf_htons(ETH_P_IP);
h_outer.l4hdr.gre.flags = 0;
break;
case IPPROTO_UDP:
flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP;
olen += sizeof(h_outer.l4hdr.udp);
h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
h_outer.l4hdr.udp.dest = __bpf_constant_htons(cfg_udp_dst);
h_outer.l4hdr.udp.check = 0;
h_outer.l4hdr.udp.len = bpf_htons(bpf_ntohs(iph_inner.tot_len) +
sizeof(h_outer.l4hdr.udp));
break;
case IPPROTO_IPIP:
break;
default:
return TC_ACT_OK;
@@ -158,27 +175,46 @@ server_listen # serverside, insert decap module # server is still running # client can connect again -ip netns exec "${ns2}" ip link add dev testtun0 type "${tuntype}" \
remote "${addr1}" local "${addr2}"
-# Because packets are decapped by the tunnel they arrive on testtun0 from -# the IP stack perspective. Ensure reverse path filtering is disabled -# otherwise we drop the TCP SYN as arriving on testtun0 instead of the -# expected veth2 (veth2 is where 192.168.1.2 is configured). -ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.all.rp_filter=0 -# rp needs to be disabled for both all and testtun0 as the rp value is -# selected as the max of the "all" and device-specific values. -ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.testtun0.rp_filter=0 -ip netns exec "${ns2}" ip link set dev testtun0 up -echo "test bpf encap with tunnel device decap" -client_connect -verify_data
+# Skip tunnel tests for ip6udp. For IPv6, a UDP checksum is required +# and there seems to be no way to tell a fou6 tunnel to allow 0 +# checksums. Accordingly for both these cases, we skip tests against +# tunnel peer, and test encap using BPF decap only.
Checksum should not have to be verified over veth, when packets never leave the host, of course. Indeed, it is not for unencapsulated or inner packets. If the checksum has to be verified for upv6/udp tunnels, it would be interesting to understand why and whether that can be fixed. Not a prerequisite for this patchset, to be clear.
I assume that this is the udp_lib_checksum_complete(skb) inside the udpv6_encap_needed_key static branch in udpv6_queue_rcv_one_skb. Shouldn't skb->ip_summed be CHECKSUM_PARTIAL here? I wonder if csum_start is now incorrectly set to the outer (tunnel) header, while it should continue to point to the inner tcp header.
RFC 6935 and 6936 suggest extensions to IPv6 UDP to allow zero checksum in the narrow case of (some) tunnels. If this use case matches, I guess it is fine to support the mode even if fou6 decap does not. But if not, it would be better to make the test more realistic. For instance by setting up checksumming correctly. Perhaps with BPF_FUNC_l4_csum_replace or more interestingly by relying on the properties of local checksum offload to only have to compute a checksum over the headers (which we can do inline in the program, as length is fixed).
+if [[ "$tuntype" != "ip6udp" ]]; then
Irrespective of the details above, can we avoid the code churn from indentation below. Just run the test as is, but only change the expectation of error code in client_connect on udp and skip verify_data on client_connect failure?
if [[ "$tuntype" == "udp" ]]; then
# Set up fou tunnel.
ttype=ipip
targs="encap fou encap-sport auto encap-dport $udpport"
# fou may be a module; allow this to fail.
modprobe fou ||true
ip netns exec "${ns2}" ip fou add port 5555 ipproto "${ipproto}"
else
ttype=$tuntype
targs=""
fi
ip netns exec "${ns2}" ip link add name testtun0 type "${ttype}" \
remote "${addr1}" local "${addr2}" $targs
# Because packets are decapped by the tunnel they arrive on testtun0
# from the IP stack perspective. Ensure reverse path filtering is
# disabled otherwise we drop the TCP SYN as arriving on testtun0
# instead of the expected veth2 (veth2 is where 192.168.1.2 is
# configured).
ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.all.rp_filter=0
# rp needs to be disabled for both all and testtun0 as the rp value is
# selected as the max of the "all" and device-specific values.
ip netns exec "${ns2}" sysctl -qw net.ipv4.conf.testtun0.rp_filter=0
ip netns exec "${ns2}" ip link set dev testtun0 up
echo "test bpf encap with tunnel device decap"
client_connect
verify_data
ip netns exec "${ns2}" ip link del dev testtun0
server_listen
+fi
# serverside, use BPF for decap -ip netns exec "${ns2}" ip link del dev testtun0 ip netns exec "${ns2}" tc qdisc add dev veth2 clsact ip netns exec "${ns2}" tc filter add dev veth2 ingress \ bpf direct-action object-file ./test_tc_tunnel.o section decap -server_listen echo "test bpf encap with bpf decap" client_connect verify_data -- 1.8.3.1
In
commit 868d523535c2 ("bpf: add bpf_skb_adjust_room encap flags")
...Willem introduced support to bpf_skb_adjust_room for GSO-friendly GRE and UDP encapsulation.
For GSO to work for skbs, the inner headers (mac and network) need to be marked. For L3 encapsulation using bpf_skb_adjust_room, the mac and network headers are identical. Here we provide a way of specifying the inner mac header length for cases where L2 encap is desired. Such an approach can support encapsulated ethernet headers, MPLS headers etc. For example to convert from a packet of form [eth][ip][tcp] to [eth][ip][udp][inner mac][ip][tcp], something like the following could be done:
headroom = sizeof(iph) + sizeof(struct udphdr) + inner_maclen;
ret = bpf_skb_adjust_room(skb, headroom, BPF_ADJ_ROOM_MAC, BPF_F_ADJ_ROOM_ENCAP_L4_UDP | BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | BPF_F_ADJ_ROOM_ENCAP_L2(inner_maclen));
Signed-off-by: Alan Maguire alan.maguire@oracle.com --- include/uapi/linux/bpf.h | 5 +++++ net/core/filter.c | 19 ++++++++++++++----- 2 files changed, 19 insertions(+), 5 deletions(-)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 8370245..6d8346a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1500,6 +1500,10 @@ struct bpf_stack_build_id { * * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP **: * Use with ENCAP_L3 flags to further specify the tunnel type. * + * * **BPF_F_ADJ_ROOM_ENCAP_L2(len) **: + * Use with ENCAP_L3/L4 flags to further specify the tunnel + * type; **len** is the length of the inner MAC header. + * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be @@ -2645,6 +2649,7 @@ enum bpf_func_id { #define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2) #define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3) #define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4) +#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & 0xff) << 56)
/* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { diff --git a/net/core/filter.c b/net/core/filter.c index 22eb2ed..02ae8c0 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2969,14 +2969,16 @@ static u32 bpf_skb_net_base_len(const struct sk_buff *skb) #define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \ BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \ BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \ - BPF_F_ADJ_ROOM_ENCAP_L4_UDP) + BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \ + BPF_F_ADJ_ROOM_ENCAP_L2(0xff))
static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, u64 flags) { + u16 mac_len = 0, inner_mac = 0, inner_net = 0, inner_trans = 0; bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK; - u16 mac_len = 0, inner_net = 0, inner_trans = 0; unsigned int gso_type = SKB_GSO_DODGY; + u8 inner_mac_len = flags >> 56; int ret;
if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { @@ -3003,11 +3005,19 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) return -EINVAL;
+ if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP && + flags & BPF_F_ADJ_ROOM_FIXED_GSO && + inner_mac_len > 0) + return -EINVAL; + if (skb->encapsulation) return -EALREADY;
mac_len = skb->network_header - skb->mac_header; inner_net = skb->network_header; + if (inner_mac_len > len_diff) + return -EINVAL; + inner_mac = inner_net - inner_mac_len; inner_trans = skb->transport_header; }
@@ -3016,8 +3026,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, return ret;
if (encap) { - /* inner mac == inner_net on l3 encap */ - skb->inner_mac_header = inner_net; + skb->inner_mac_header = inner_mac; skb->inner_network_header = inner_net; skb->inner_transport_header = inner_trans; skb_set_inner_protocol(skb, skb->protocol); @@ -3031,7 +3040,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, gso_type |= SKB_GSO_GRE; else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) gso_type |= SKB_GSO_IPXIP6; - else + else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4) gso_type |= SKB_GSO_IPXIP4;
if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE ||
On Mon, Apr 1, 2019 at 11:33 AM Alan Maguire alan.maguire@oracle.com wrote:
In
commit 868d523535c2 ("bpf: add bpf_skb_adjust_room encap flags")
...Willem introduced support to bpf_skb_adjust_room for GSO-friendly GRE and UDP encapsulation.
For GSO to work for skbs, the inner headers (mac and network) need to be marked. For L3 encapsulation using bpf_skb_adjust_room, the mac and network headers are identical. Here we provide a way of specifying the inner mac header length for cases where L2 encap is desired. Such an approach can support encapsulated ethernet headers, MPLS headers etc. For example to convert from a packet of form [eth][ip][tcp] to [eth][ip][udp][inner mac][ip][tcp], something like the following could be done:
headroom = sizeof(iph) + sizeof(struct udphdr) + inner_maclen; ret = bpf_skb_adjust_room(skb, headroom, BPF_ADJ_ROOM_MAC, BPF_F_ADJ_ROOM_ENCAP_L4_UDP | BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | BPF_F_ADJ_ROOM_ENCAP_L2(inner_maclen));
Signed-off-by: Alan Maguire alan.maguire@oracle.com
+#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & 0xff) << 56)
Here ..
/* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { diff --git a/net/core/filter.c b/net/core/filter.c index 22eb2ed..02ae8c0 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2969,14 +2969,16 @@ static u32 bpf_skb_net_base_len(const struct sk_buff *skb) #define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \ BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \ BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \
BPF_F_ADJ_ROOM_ENCAP_L2(0xff))
.. and here ..
static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, u64 flags) {
u16 mac_len = 0, inner_mac = 0, inner_net = 0, inner_trans = 0; bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK;
u16 mac_len = 0, inner_net = 0, inner_trans = 0; unsigned int gso_type = SKB_GSO_DODGY;
u8 inner_mac_len = flags >> 56;
.. and here: please do not use hardcoded constants. Define explicit constant integers or macros for readability and consistency.
int ret; if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
@@ -3003,11 +3005,19 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) return -EINVAL;
if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP &&
flags & BPF_F_ADJ_ROOM_FIXED_GSO &&
inner_mac_len > 0)
return -EINVAL;
Why is UDP encap with inner MAC (or MPLS) not allowed with fixed GSO?
Sync include/uapi/linux/bpf.h with tools/ equivalent to add BPF_F_ADJ_ROOM_ENCAP_L2(len) macro.
Signed-off-by: Alan Maguire alan.maguire@oracle.com --- tools/include/uapi/linux/bpf.h | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 8370245..6d8346a 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1500,6 +1500,10 @@ struct bpf_stack_build_id { * * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP **: * Use with ENCAP_L3 flags to further specify the tunnel type. * + * * **BPF_F_ADJ_ROOM_ENCAP_L2(len) **: + * Use with ENCAP_L3/L4 flags to further specify the tunnel + * type; **len** is the length of the inner MAC header. + * * A call to this helper is susceptible to change the underlaying * packet buffer. Therefore, at load time, all checks on pointers * previously done by the verifier are invalidated and must be @@ -2645,6 +2649,7 @@ enum bpf_func_id { #define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2) #define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3) #define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4) +#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & 0xff) << 56)
/* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode {
Update test_tc_tunnel to verify adding inner L2 header encapsulation (an MPLS label) works.
Signed-off-by: Alan Maguire alan.maguire@oracle.com --- tools/testing/selftests/bpf/progs/test_tc_tunnel.c | 172 +++++++++++++++++---- tools/testing/selftests/bpf/test_tc_tunnel.sh | 59 +++---- 2 files changed, 170 insertions(+), 61 deletions(-)
diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c index cc88379..5127b1b 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c +++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c @@ -11,6 +11,7 @@ #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <linux/mpls.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/pkt_cls.h> @@ -23,7 +24,13 @@
static const int cfg_udp_src = 20000; static const int cfg_udp_dst = 5555; +/* MPLSoverUDP */ +#define MPLS_OVER_UDP_PORT 6635 +static const int cfg_mplsudp_dst = MPLS_OVER_UDP_PORT;
+/* MPLS label 1000 with S bit (last label) set and ttl of 255. */ +static const __u32 mpls_label = __bpf_constant_htonl(1000 << 12 | + MPLS_LS_S_MASK | 0xff); struct gre_hdr { __be16 flags; __be16 protocol; @@ -37,6 +44,7 @@ struct gre_hdr { struct v4hdr { struct iphdr ip; union l4hdr l4hdr; + __u8 pad[16]; /* enough space for eth header after udp hdr */ } __attribute__((packed));
struct v6hdr { @@ -59,14 +67,17 @@ static __always_inline void set_ipv4_csum(struct iphdr *iph) iph->check = ~((csum & 0xffff) + (csum >> 16)); }
-static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto) +static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto, + __u16 l2_proto) { struct iphdr iph_inner; struct v4hdr h_outer; struct udphdr *udph; struct tcphdr tcph; + struct ethhdr eth; + int olen, elen; __u64 flags; - int olen; + __u16 dst;
if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner, sizeof(iph_inner)) < 0) @@ -84,23 +95,39 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto) return TC_ACT_OK;
olen = sizeof(h_outer.ip); + elen = 0;
flags = BPF_F_ADJ_ROOM_ENCAP_L3_IPV4; + + if (l2_proto == ETH_P_MPLS_UC) { + elen = sizeof(mpls_label); + flags |= BPF_F_ADJ_ROOM_ENCAP_L2(elen); + } + switch (encap_proto) { case IPPROTO_GRE: flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE | BPF_F_ADJ_ROOM_FIXED_GSO; olen += sizeof(h_outer.l4hdr.gre); - h_outer.l4hdr.gre.protocol = bpf_htons(ETH_P_IP); + h_outer.l4hdr.gre.protocol = bpf_htons(l2_proto); h_outer.l4hdr.gre.flags = 0; break; case IPPROTO_UDP: flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP; olen += sizeof(h_outer.l4hdr.udp); - h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src); - h_outer.l4hdr.udp.dest = __bpf_constant_htons(cfg_udp_dst); h_outer.l4hdr.udp.check = 0; h_outer.l4hdr.udp.len = bpf_htons(bpf_ntohs(iph_inner.tot_len) + - sizeof(h_outer.l4hdr.udp)); + sizeof(h_outer.l4hdr.udp) + + elen); + h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src); + switch (l2_proto) { + case ETH_P_IP: + dst = cfg_udp_dst; + break; + case ETH_P_MPLS_UC: + dst = cfg_mplsudp_dst; + break; + } + h_outer.l4hdr.udp.dest = bpf_htons(dst); break; case IPPROTO_IPIP: break; @@ -108,6 +135,13 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto) return TC_ACT_OK; }
+ /* add L2 encap (if specified) */ + if (l2_proto == ETH_P_MPLS_UC) + __builtin_memcpy((__u8 *)&h_outer + olen, &mpls_label, + sizeof(mpls_label)); + + olen += elen; + /* add room between mac and network header */ if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags)) return TC_ACT_SHOT; @@ -124,18 +158,19 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto) if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen, BPF_F_INVALIDATE_HASH) < 0) return TC_ACT_SHOT; - return TC_ACT_OK; }
-static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto) +static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto, + __u16 l2_proto) { struct ipv6hdr iph_inner; struct v6hdr h_outer; struct tcphdr tcph; + int olen, elen; __u16 tot_len; __u64 flags; - int olen; + __u16 dst;
if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner, sizeof(iph_inner)) < 0) @@ -150,24 +185,39 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto) return TC_ACT_OK;
olen = sizeof(h_outer.ip); + elen = 0;
flags = BPF_F_ADJ_ROOM_ENCAP_L3_IPV6; + + if (l2_proto == ETH_P_MPLS_UC) { + elen = sizeof(mpls_label); + flags |= BPF_F_ADJ_ROOM_ENCAP_L2(elen); + } + switch (encap_proto) { case IPPROTO_GRE: flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE | BPF_F_ADJ_ROOM_FIXED_GSO; olen += sizeof(h_outer.l4hdr.gre); - h_outer.l4hdr.gre.protocol = bpf_htons(ETH_P_IPV6); + h_outer.l4hdr.gre.protocol = bpf_htons(l2_proto); h_outer.l4hdr.gre.flags = 0; break; case IPPROTO_UDP: flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP; olen += sizeof(h_outer.l4hdr.udp); - h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src); - h_outer.l4hdr.udp.dest = __bpf_constant_htons(cfg_udp_dst); h_outer.l4hdr.udp.check = 0; tot_len = bpf_ntohs(iph_inner.payload_len) + sizeof(iph_inner); h_outer.l4hdr.udp.len = bpf_htons(tot_len + - sizeof(h_outer.l4hdr.udp)); + sizeof(h_outer.l4hdr.udp) + elen); + h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src); + switch (l2_proto) { + case ETH_P_IPV6: + dst = cfg_udp_dst; + break; + case ETH_P_MPLS_UC: + dst = cfg_mplsudp_dst; + break; + } + h_outer.l4hdr.udp.dest = bpf_htons(dst); break; case IPPROTO_IPV6: break; @@ -175,6 +225,13 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto) return TC_ACT_OK; }
+ /* add L2 encap (if specified) */ + if (l2_proto == ETH_P_MPLS_UC) + __builtin_memcpy((__u8 *)&h_outer + olen, &mpls_label, + sizeof(mpls_label)); + + olen += elen; + /* add room between mac and network header */ if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags)) return TC_ACT_SHOT; @@ -194,63 +251,104 @@ static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto) return TC_ACT_OK; }
-SEC("encap_ipip") +SEC("encap_ipip_none") int __encap_ipip(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) - return encap_ipv4(skb, IPPROTO_IPIP); + return encap_ipv4(skb, IPPROTO_IPIP, ETH_P_IP); + else + return TC_ACT_OK; +} + +SEC("encap_gre_none") +int __encap_gre_none(struct __sk_buff *skb) +{ + if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) + return encap_ipv4(skb, IPPROTO_GRE, ETH_P_IP); else return TC_ACT_OK; }
-SEC("encap_gre") -int __encap_gre(struct __sk_buff *skb) +SEC("encap_gre_mpls") +int __encap_gre_mpls(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) - return encap_ipv4(skb, IPPROTO_GRE); + return encap_ipv4(skb, IPPROTO_GRE, ETH_P_MPLS_UC); else return TC_ACT_OK; }
-SEC("encap_udp") + +SEC("encap_udp_none") int __encap_udp(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) - return encap_ipv4(skb, IPPROTO_UDP); + return encap_ipv4(skb, IPPROTO_UDP, ETH_P_IP); + else + return TC_ACT_OK; +} + +SEC("encap_udp_mpls") +int __encap_udp_mpls(struct __sk_buff *skb) +{ + if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) + return encap_ipv4(skb, IPPROTO_UDP, ETH_P_MPLS_UC); + else + return TC_ACT_OK; +} + + +SEC("encap_ip6tnl_none") +int __encap_ip6tnl_none(struct __sk_buff *skb) +{ + if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) + return encap_ipv6(skb, IPPROTO_IPV6, ETH_P_IPV6); + else + return TC_ACT_OK; +} + +SEC("encap_ip6gre_none") +int __encap_ip6gre_none(struct __sk_buff *skb) +{ + if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) + return encap_ipv6(skb, IPPROTO_GRE, ETH_P_IPV6); else return TC_ACT_OK; }
-SEC("encap_ip6tnl") -int __encap_ip6tnl(struct __sk_buff *skb) +SEC("encap_ip6gre_mpls") +int __encap_ip6gre_mpls(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) - return encap_ipv6(skb, IPPROTO_IPV6); + return encap_ipv6(skb, IPPROTO_GRE, ETH_P_MPLS_UC); else return TC_ACT_OK; }
-SEC("encap_ip6gre") -int __encap_ip6gre(struct __sk_buff *skb) +SEC("encap_ip6udp_none") +int __encap_ip6udp_none(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) - return encap_ipv6(skb, IPPROTO_GRE); + return encap_ipv6(skb, IPPROTO_UDP, ETH_P_IPV6); else return TC_ACT_OK; }
-SEC("encap_ip6udp") -int __encap_ip6udp(struct __sk_buff *skb) +SEC("encap_ip6udp_mpls") +int __encap_ip6udp_mpls(struct __sk_buff *skb) { if (skb->protocol == __bpf_constant_htons(ETH_P_IPV6)) - return encap_ipv6(skb, IPPROTO_UDP); + return encap_ipv6(skb, IPPROTO_UDP, ETH_P_MPLS_UC); else return TC_ACT_OK; }
-static int decap_internal(struct __sk_buff *skb, int off, int len, char proto) +static __always_inline int decap_internal(struct __sk_buff *skb, int off, + int len, char proto) { char buf[sizeof(struct v6hdr)]; + struct gre_hdr greh; + struct udphdr udph; int olen = len;
switch (proto) { @@ -259,9 +357,17 @@ static int decap_internal(struct __sk_buff *skb, int off, int len, char proto) break; case IPPROTO_GRE: olen += sizeof(struct gre_hdr); + if (bpf_skb_load_bytes(skb, off + len, &greh, sizeof(greh)) < 0) + return TC_ACT_OK; + if (bpf_ntohs(greh.protocol) == ETH_P_MPLS_UC) + olen += sizeof(mpls_label); break; case IPPROTO_UDP: olen += sizeof(struct udphdr); + if (bpf_skb_load_bytes(skb, off + len, &udph, sizeof(udph)) < 0) + return TC_ACT_OK; + if (bpf_ntohs(udph.dest) == MPLS_OVER_UDP_PORT) + olen += sizeof(mpls_label); break; default: return TC_ACT_OK; @@ -274,7 +380,7 @@ static int decap_internal(struct __sk_buff *skb, int off, int len, char proto) return TC_ACT_OK; }
-static int decap_ipv4(struct __sk_buff *skb) +static __always_inline int decap_ipv4(struct __sk_buff *skb) { struct iphdr iph_outer;
@@ -289,7 +395,7 @@ static int decap_ipv4(struct __sk_buff *skb) iph_outer.protocol); }
-static int decap_ipv6(struct __sk_buff *skb) +static __always_inline int decap_ipv6(struct __sk_buff *skb) { struct ipv6hdr iph_outer;
@@ -302,7 +408,7 @@ static int decap_ipv6(struct __sk_buff *skb) }
SEC("decap") -int decap_f(struct __sk_buff *skb) +static int decap_f(struct __sk_buff *skb) { switch (skb->protocol) { case __bpf_constant_htons(ETH_P_IP): diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh index 3ae54f0..37c479e 100755 --- a/tools/testing/selftests/bpf/test_tc_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh @@ -89,42 +89,44 @@ set -e # no arguments: automated test, run all if [[ "$#" -eq "0" ]]; then echo "ipip" - $0 ipv4 ipip 100 + $0 ipv4 ipip none 100
echo "ip6ip6" - $0 ipv6 ip6tnl 100 + $0 ipv6 ip6tnl none 100
- echo "ip gre" - $0 ipv4 gre 100 + for mac in none mpls ; do + echo "ip gre $mac" + $0 ipv4 gre $mac 100
- echo "ip6 gre" - $0 ipv6 ip6gre 100 + echo "ip6 gre $mac" + $0 ipv6 ip6gre $mac 100
- echo "ip gre gso" - $0 ipv4 gre 2000 + echo "ip gre $mac gso" + $0 ipv4 gre $mac 2000
- echo "ip6 gre gso" - $0 ipv6 ip6gre 2000 + echo "ip6 gre $mac gso" + $0 ipv6 ip6gre $mac 2000
- echo "ip udp" - $0 ipv4 udp 100 + echo "ip udp $mac" + $0 ipv4 udp $mac 100
- echo "ip6 udp" - $0 ipv6 ip6udp 100 + echo "ip6 udp $mac" + $0 ipv6 ip6udp $mac 100
- echo "ip udp gso" - $0 ipv4 udp 2000 + echo "ip udp $mac gso" + $0 ipv4 udp $mac 2000
- echo "ip6 udp gso" - $0 ipv6 ip6udp 2000 + echo "ip6 udp $mac gso" + $0 ipv6 ip6udp $mac 2000 + done
echo "OK. All tests passed" exit 0 fi
-if [[ "$#" -ne "3" ]]; then +if [[ "$#" -ne "4" ]]; then echo "Usage: $0" - echo " or: $0 <ipv4|ipv6> <tuntype> <data_len>" + echo " or: $0 <ipv4|ipv6> <tuntype> <none|mpls> <data_len>" exit 1 fi
@@ -148,9 +150,10 @@ case "$1" in esac
readonly tuntype=$2 -readonly datalen=$3 +readonly mactype=$3 +readonly datalen=$4
-echo "encap ${addr1} to ${addr2}, type ${tuntype}, len ${datalen}" +echo "encap ${addr1} to ${addr2}, tun ${tuntype} mac ${mactype} len ${datalen}"
trap cleanup EXIT
@@ -167,7 +170,7 @@ verify_data ip netns exec "${ns1}" tc qdisc add dev veth1 clsact ip netns exec "${ns1}" tc filter add dev veth1 egress \ bpf direct-action object-file ./test_tc_tunnel.o \ - section "encap_${tuntype}" + section "encap_${tuntype}_${mactype}" echo "test bpf encap without decap (expect failure)" server_listen ! client_connect @@ -176,11 +179,11 @@ server_listen # server is still running # client can connect again
-# Skip tunnel tests for ip6udp. For IPv6, a UDP checksum is required -# and there seems to be no way to tell a fou6 tunnel to allow 0 -# checksums. Accordingly for both these cases, we skip tests against -# tunnel peer, and test encap using BPF decap only. -if [[ "$tuntype" != "ip6udp" ]]; then +# Skip tunnel tests for L2 encap and ip6udp. For IPv6, a UDP checksum +# is required and there seems to be no way to tell a fou6 tunnel to +# allow 0 checksums. Accordingly for both these cases, we skip tests +# against tunnel peer and test using BPF decap only. +if [[ "$mactype" == "none" && "$tuntype" != "ip6udp" ]]; then if [[ "$tuntype" == "udp" ]]; then # Set up fou tunnel. ttype=ipip
On Mon, Apr 1, 2019 at 11:33 AM Alan Maguire alan.maguire@oracle.com wrote:
Update test_tc_tunnel to verify adding inner L2 header encapsulation (an MPLS label) works.
Signed-off-by: Alan Maguire alan.maguire@oracle.com
@@ -84,23 +95,39 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto) return TC_ACT_OK;
olen = sizeof(h_outer.ip);
elen = 0;
nit: could you pick a (slightly) more descriptive name? I don't get what e is an abbreviation for here.
flags = BPF_F_ADJ_ROOM_ENCAP_L3_IPV4;
if (l2_proto == ETH_P_MPLS_UC) {
elen = sizeof(mpls_label);
flags |= BPF_F_ADJ_ROOM_ENCAP_L2(elen);
}
switch (encap_proto) { case IPPROTO_GRE:
To verify that this L2 encap method is generic, it would be useful to also test IPPROTO_GRE + ETH_P_TEB, if that's not too much work to add.
flags |= BPF_F_ADJ_ROOM_ENCAP_L4_GRE | BPF_F_ADJ_ROOM_FIXED_GSO; olen += sizeof(h_outer.l4hdr.gre);
h_outer.l4hdr.gre.protocol = bpf_htons(ETH_P_IP);
h_outer.l4hdr.gre.protocol = bpf_htons(l2_proto); h_outer.l4hdr.gre.flags = 0; break; case IPPROTO_UDP: flags |= BPF_F_ADJ_ROOM_ENCAP_L4_UDP; olen += sizeof(h_outer.l4hdr.udp);
h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
h_outer.l4hdr.udp.dest = __bpf_constant_htons(cfg_udp_dst); h_outer.l4hdr.udp.check = 0; h_outer.l4hdr.udp.len = bpf_htons(bpf_ntohs(iph_inner.tot_len) +
sizeof(h_outer.l4hdr.udp));
sizeof(h_outer.l4hdr.udp) +
elen);
h_outer.l4hdr.udp.source = __bpf_constant_htons(cfg_udp_src);
switch (l2_proto) {
case ETH_P_IP:
dst = cfg_udp_dst;
break;
case ETH_P_MPLS_UC:
dst = cfg_mplsudp_dst;
break;
}
h_outer.l4hdr.udp.dest = bpf_htons(dst);
nit: more concise:
h_outer.l4hdr.udp.dest = bpf_htons(l2_proto == ETH_P_IP ? cfg_udp_dst : cfg_mplsudp_dst);
break; case IPPROTO_IPIP: break;
@@ -108,6 +135,13 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto) return TC_ACT_OK; }
/* add L2 encap (if specified) */
if (l2_proto == ETH_P_MPLS_UC)
__builtin_memcpy((__u8 *)&h_outer + olen, &mpls_label,
sizeof(mpls_label));
nit: no need for memcpy, can cast to __u32 and use regular integer assignment.
olen += elen;
/* add room between mac and network header */ if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags)) return TC_ACT_SHOT;
@@ -124,18 +158,19 @@ static __always_inline int encap_ipv4(struct __sk_buff *skb, __u8 encap_proto) if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen, BPF_F_INVALIDATE_HASH) < 0) return TC_ACT_SHOT;
nit: irrelevant change
-if [[ "$#" -ne "3" ]]; then +if [[ "$#" -ne "4" ]]; then echo "Usage: $0"
echo " or: $0 <ipv4|ipv6> <tuntype> <data_len>"
echo " or: $0 <ipv4|ipv6> <tuntype> <none|mpls> <data_len>" exit 1
fi
@@ -148,9 +150,10 @@ case "$1" in esac
readonly tuntype=$2 -readonly datalen=$3 +readonly mactype=$3 +readonly datalen=$4
-echo "encap ${addr1} to ${addr2}, type ${tuntype}, len ${datalen}" +echo "encap ${addr1} to ${addr2}, tun ${tuntype} mac ${mactype} len ${datalen}"
trap cleanup EXIT
@@ -167,7 +170,7 @@ verify_data ip netns exec "${ns1}" tc qdisc add dev veth1 clsact ip netns exec "${ns1}" tc filter add dev veth1 egress \ bpf direct-action object-file ./test_tc_tunnel.o \
section "encap_${tuntype}"
section "encap_${tuntype}_${mactype}"
echo "test bpf encap without decap (expect failure)" server_listen ! client_connect @@ -176,11 +179,11 @@ server_listen # server is still running # client can connect again
-# Skip tunnel tests for ip6udp. For IPv6, a UDP checksum is required -# and there seems to be no way to tell a fou6 tunnel to allow 0 -# checksums. Accordingly for both these cases, we skip tests against -# tunnel peer, and test encap using BPF decap only. -if [[ "$tuntype" != "ip6udp" ]]; then +# Skip tunnel tests for L2 encap and ip6udp. For IPv6, a UDP checksum +# is required and there seems to be no way to tell a fou6 tunnel to +# allow 0 checksums.
Please update comment for L2 encap: why can we not test with a device for MPLS decap? I am not suggesting adding it if complex. But note that I added the tunnel device decap before the bpf decap especially to have some verification that our logic matches that generated by real tunnel devices. With our own BPF code on both sides, that cannot be tested.
On Mon, Apr 1, 2019 at 11:33 AM Alan Maguire alan.maguire@oracle.com wrote:
Extend bpf_skb_adjust_room growth to mark inner MAC header so that L2 encapsulation can be used for tc tunnels.
Patch #1 extends the existing test_tc_tunnel to support UDP encapsulation; later we want to be able to test MPLS over UDP and MPLS over GRE encapsulation.
Patch #2 adds the BPF_F_ADJ_ROOM_ENCAP_L2(len) macro, which allows specification of inner mac length. Other approaches were explored prior to taking this approach. Specifically, I tried automatically computing the inner mac length on the basis of the specified flags (so inner maclen for GRE/IPv4 encap is the len_diff specified to bpf_skb_adjust_room minus GRE + IPv4 header length for example). Problem with this is that we don't know for sure what form of GRE/UDP header we have; is it a full GRE header, or is it a FOU UDP header or generic UDP encap header? My fear here was we'd end up with an explosion of flags.
Agreed.
The other approach tried was to support inner L2 header marking as a separate room adjustment, i.e. adjust for L3/L4 encap, then call bpf_skb_adjust_room for L2 encap. This can be made to work but because it imposed an order on operations, felt a bit clunky.
It seems slightly simpler to me. But this removes one extra call, so is fine, too. Given that there is prior art of encoding length fields in flags.
That is, if it works for all cases of L2 encap: by itself, before and after L3 tunneling:
- [eth] {mpls} [ip] [tcp] [payload] - [eth] {ip} {gre} {mpls} [ip] [tcp] [payload] - [eth] {mpls} {ip} {gre} [ip] [tcp] [payload]
Less important, instead of encoding length inside flags, it is arguably cleaner to have a one-bit flag BPF_F_ADJ_ROOM_ENCAP_L2 plus a new argument l2_len.
Patch #3 syncs tools/ bpf.h.
Patch #4 extends the tests again to support MPLSoverGRE and MPLSoverUDP encap, along with existing test coverage.
Alan Maguire (4): selftests_bpf: extend test_tc_tunnel for UDP encap bpf: add layer 2 encap support to bpf_skb_adjust_room bpf: sync bpf.h to tools/ for BPF_F_ADJ_ROOM_ENCAP_L2 selftests_bpf: extend test_tc_tunnel.sh test for L2 encap
include/uapi/linux/bpf.h | 5 + net/core/filter.c | 19 +- tools/include/uapi/linux/bpf.h | 5 + tools/testing/selftests/bpf/progs/test_tc_tunnel.c | 281 ++++++++++++++++----- tools/testing/selftests/bpf/test_tc_tunnel.sh | 105 +++++--- 5 files changed, 318 insertions(+), 97 deletions(-)
-- 1.8.3.1
On Mon, Apr 1, 2019 at 11:33 AM Alan Maguire alan.maguire@oracle.com wrote:
Extend bpf_skb_adjust_room growth to mark inner MAC header so that L2 encapsulation can be used for tc tunnels.
Perhaps superfluously, but I forgot to mention earlier during review: super useful extension! Thanks for working on this Alan.
linux-kselftest-mirror@lists.linaro.org