This patch sets network_header and inner_network_header to their respective values during the receive phase of GRO. This allows us to use inner_network_header later on in GRO. network_header is already set in dev_gro_receive and under encapsulation inner_network_header is set.
Signed-off-by: Richard Gobert richardbgobert@gmail.com --- include/net/gro.h | 13 +++++++++++-- net/8021q/vlan_core.c | 5 +++++ net/ethernet/eth.c | 1 + net/ipv4/af_inet.c | 9 ++------- net/ipv4/gre_offload.c | 1 + net/ipv6/ip6_offload.c | 12 +++++------- 6 files changed, 25 insertions(+), 16 deletions(-)
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 7515e6bcbb7d..db27b6851360 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -441,6 +441,7 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
skb_gro_pull(skb, sizeof(*eh)); skb_gro_postpull_rcsum(skb, eh, sizeof(*eh)); + skb_set_inner_network_header(skb, skb_gro_offset(skb));
pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive, ipv6_gro_receive, inet_gro_receive, diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 9334b563a88b..09ab9ac4420b 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1567,10 +1567,6 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF)); NAPI_GRO_CB(skb)->flush |= flush; - skb_set_network_header(skb, off); - /* The above will be needed by the transport layer if there is one - * immediately following this IP hdr. - */
/* Note : No need to call skb_gro_postpull_rcsum() here, * as we already checked checksum over ipv4 header was 0 @@ -1596,6 +1592,7 @@ static struct sk_buff *ipip_gro_receive(struct list_head *head, }
NAPI_GRO_CB(skb)->encap_mark = 1; + skb_set_inner_network_header(skb, skb_gro_offset(skb));
return inet_gro_receive(head, skb); } @@ -1648,10 +1645,8 @@ int inet_gro_complete(struct sk_buff *skb, int prior_off, int nhoff) int proto = iph->protocol; int err = -ENOSYS;
- if (skb->encapsulation) { + if (skb->encapsulation) skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP)); - skb_set_inner_network_header(skb, nhoff); - }
iph_set_totlen(iph, skb->len - nhoff); csum_replace2(&iph->check, totlen, iph->tot_len); diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index d4520c3f7c09..bfc3a0cbb3a3 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -224,6 +224,7 @@ static struct sk_buff *gre_gro_receive(struct list_head *head, /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ skb_gro_postpull_rcsum(skb, greh, grehlen);
+ skb_set_inner_network_header(skb, skb_gro_offset(skb)); pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); flush = 0;
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index e3a05b84c76a..29601be9fa90 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -67,7 +67,7 @@ static int ipv6_gro_pull_exthdrs(struct sk_buff *skb, int off, int proto) off += len; }
- skb_gro_pull(skb, off - skb_network_offset(skb)); + skb_gro_pull(skb, off - skb_gro_network_offset(skb)); return proto; }
@@ -236,8 +236,6 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, if (unlikely(!iph)) goto out;
- skb_set_network_header(skb, off); - flush += ntohs(iph->payload_len) != skb->len - hlen;
proto = iph->nexthdr; @@ -259,7 +257,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, NAPI_GRO_CB(skb)->proto = proto;
flush--; - nlen = skb_network_header_len(skb); + nlen = skb_gro_offset(skb) - off;
list_for_each_entry(p, head, list) { const struct ipv6hdr *iph2; @@ -327,6 +325,7 @@ static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head, }
NAPI_GRO_CB(skb)->encap_mark = 1; + skb_set_inner_network_header(skb, skb_gro_offset(skb));
return ipv6_gro_receive(head, skb); } @@ -342,6 +341,7 @@ static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, }
NAPI_GRO_CB(skb)->encap_mark = 1; + skb_set_inner_network_header(skb, skb_gro_offset(skb));
return inet_gro_receive(head, skb); } @@ -355,10 +355,8 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, u32 payload_len; int nhlen;
- if (skb->encapsulation) { + if (skb->encapsulation) skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6)); - skb_set_inner_network_header(skb, nhoff); - }
payload_len = skb->len - nhoff - sizeof(*iph); if (unlikely(payload_len > IPV6_MAXPLEN)) { diff --git a/include/net/gro.h b/include/net/gro.h index cb7282bf3d63..923cbcc4c2fa 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -171,12 +171,21 @@ static inline void *skb_gro_header(struct sk_buff *skb, unsigned int hlen, return ptr; }
+static inline int skb_gro_network_offset(const struct sk_buff *skb) +{ + const u32 mask = NAPI_GRO_CB(skb)->encap_mark - 1; + + return (skb_network_offset(skb) & mask) | (skb_inner_network_offset(skb) & ~mask); +} + static inline void *skb_gro_network_header(const struct sk_buff *skb) { + const int offset = skb_gro_network_offset(skb); + if (skb_gro_may_pull(skb, skb_gro_offset(skb))) - return skb_gro_header_fast(skb, skb_network_offset(skb)); + return skb_gro_header_fast(skb, offset);
- return skb_network_header(skb); + return skb->data + offset; }
static inline __wsum inet_gro_compute_pseudo(const struct sk_buff *skb, diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 247704cf70af..a61bfcff6250 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -478,6 +478,11 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head, if (unlikely(!vhdr)) goto out;
+ if (NAPI_GRO_CB(skb)->encap_mark) + skb_set_inner_network_header(skb, hlen); + else + skb_set_network_header(skb, hlen); + type = vhdr->h_vlan_encapsulated_proto;
ptype = gro_find_receive_by_type(type);