Commit 13e56ec2 authored by Stanislav Fomichev's avatar Stanislav Fomichev Committed by Alexei Starovoitov

selftests/bpf: use thoff instead of nhoff in BPF flow dissector

We are returning thoff from the flow dissector, not the nhoff. Pass
thoff along with nhoff to the bpf program (initially thoff == nhoff)
and expect flow dissector amend/return thoff, not nhoff.

This avoids confusion, when by the time bpf flow dissector exits,
nhoff == thoff, which doesn't make much sense.
Signed-off-by: default avatarStanislav Fomichev <sdf@google.com>
Acked-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 1b4e5ad5
...@@ -783,6 +783,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, ...@@ -783,6 +783,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
/* Pass parameters to the BPF program */ /* Pass parameters to the BPF program */
cb->qdisc_cb.flow_keys = &flow_keys; cb->qdisc_cb.flow_keys = &flow_keys;
flow_keys.nhoff = nhoff; flow_keys.nhoff = nhoff;
flow_keys.thoff = nhoff;
bpf_compute_data_pointers((struct sk_buff *)skb); bpf_compute_data_pointers((struct sk_buff *)skb);
result = BPF_PROG_RUN(attached, skb); result = BPF_PROG_RUN(attached, skb);
......
...@@ -70,18 +70,18 @@ static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb, ...@@ -70,18 +70,18 @@ static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb,
{ {
void *data_end = (void *)(long)skb->data_end; void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data; void *data = (void *)(long)skb->data;
__u16 nhoff = skb->flow_keys->nhoff; __u16 thoff = skb->flow_keys->thoff;
__u8 *hdr; __u8 *hdr;
/* Verifies this variable offset does not overflow */ /* Verifies this variable offset does not overflow */
if (nhoff > (USHRT_MAX - hdr_size)) if (thoff > (USHRT_MAX - hdr_size))
return NULL; return NULL;
hdr = data + nhoff; hdr = data + thoff;
if (hdr + hdr_size <= data_end) if (hdr + hdr_size <= data_end)
return hdr; return hdr;
if (bpf_skb_load_bytes(skb, nhoff, buffer, hdr_size)) if (bpf_skb_load_bytes(skb, thoff, buffer, hdr_size))
return NULL; return NULL;
return buffer; return buffer;
...@@ -158,13 +158,13 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) ...@@ -158,13 +158,13 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
/* Only inspect standard GRE packets with version 0 */ /* Only inspect standard GRE packets with version 0 */
return BPF_OK; return BPF_OK;
keys->nhoff += sizeof(*gre); /* Step over GRE Flags and Proto */ keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */
if (GRE_IS_CSUM(gre->flags)) if (GRE_IS_CSUM(gre->flags))
keys->nhoff += 4; /* Step over chksum and Padding */ keys->thoff += 4; /* Step over chksum and Padding */
if (GRE_IS_KEY(gre->flags)) if (GRE_IS_KEY(gre->flags))
keys->nhoff += 4; /* Step over key */ keys->thoff += 4; /* Step over key */
if (GRE_IS_SEQ(gre->flags)) if (GRE_IS_SEQ(gre->flags))
keys->nhoff += 4; /* Step over sequence number */ keys->thoff += 4; /* Step over sequence number */
keys->is_encap = true; keys->is_encap = true;
...@@ -174,7 +174,7 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) ...@@ -174,7 +174,7 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
if (!eth) if (!eth)
return BPF_DROP; return BPF_DROP;
keys->nhoff += sizeof(*eth); keys->thoff += sizeof(*eth);
return parse_eth_proto(skb, eth->h_proto); return parse_eth_proto(skb, eth->h_proto);
} else { } else {
...@@ -191,7 +191,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) ...@@ -191,7 +191,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
if ((__u8 *)tcp + (tcp->doff << 2) > data_end) if ((__u8 *)tcp + (tcp->doff << 2) > data_end)
return BPF_DROP; return BPF_DROP;
keys->thoff = keys->nhoff;
keys->sport = tcp->source; keys->sport = tcp->source;
keys->dport = tcp->dest; keys->dport = tcp->dest;
return BPF_OK; return BPF_OK;
...@@ -201,7 +200,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto) ...@@ -201,7 +200,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
if (!udp) if (!udp)
return BPF_DROP; return BPF_DROP;
keys->thoff = keys->nhoff;
keys->sport = udp->source; keys->sport = udp->source;
keys->dport = udp->dest; keys->dport = udp->dest;
return BPF_OK; return BPF_OK;
...@@ -252,8 +250,8 @@ PROG(IP)(struct __sk_buff *skb) ...@@ -252,8 +250,8 @@ PROG(IP)(struct __sk_buff *skb)
keys->ipv4_src = iph->saddr; keys->ipv4_src = iph->saddr;
keys->ipv4_dst = iph->daddr; keys->ipv4_dst = iph->daddr;
keys->nhoff += iph->ihl << 2; keys->thoff += iph->ihl << 2;
if (data + keys->nhoff > data_end) if (data + keys->thoff > data_end)
return BPF_DROP; return BPF_DROP;
if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) { if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) {
...@@ -285,7 +283,7 @@ PROG(IPV6)(struct __sk_buff *skb) ...@@ -285,7 +283,7 @@ PROG(IPV6)(struct __sk_buff *skb)
keys->addr_proto = ETH_P_IPV6; keys->addr_proto = ETH_P_IPV6;
memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr)); memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr));
keys->nhoff += sizeof(struct ipv6hdr); keys->thoff += sizeof(struct ipv6hdr);
return parse_ipv6_proto(skb, ip6h->nexthdr); return parse_ipv6_proto(skb, ip6h->nexthdr);
} }
...@@ -301,7 +299,7 @@ PROG(IPV6OP)(struct __sk_buff *skb) ...@@ -301,7 +299,7 @@ PROG(IPV6OP)(struct __sk_buff *skb)
/* hlen is in 8-octets and does not include the first 8 bytes /* hlen is in 8-octets and does not include the first 8 bytes
* of the header * of the header
*/ */
skb->flow_keys->nhoff += (1 + ip6h->hdrlen) << 3; skb->flow_keys->thoff += (1 + ip6h->hdrlen) << 3;
return parse_ipv6_proto(skb, ip6h->nexthdr); return parse_ipv6_proto(skb, ip6h->nexthdr);
} }
...@@ -315,7 +313,7 @@ PROG(IPV6FR)(struct __sk_buff *skb) ...@@ -315,7 +313,7 @@ PROG(IPV6FR)(struct __sk_buff *skb)
if (!fragh) if (!fragh)
return BPF_DROP; return BPF_DROP;
keys->nhoff += sizeof(*fragh); keys->thoff += sizeof(*fragh);
keys->is_frag = true; keys->is_frag = true;
if (!(fragh->frag_off & bpf_htons(IP6_OFFSET))) if (!(fragh->frag_off & bpf_htons(IP6_OFFSET)))
keys->is_first_frag = true; keys->is_first_frag = true;
...@@ -341,7 +339,7 @@ PROG(VLAN)(struct __sk_buff *skb) ...@@ -341,7 +339,7 @@ PROG(VLAN)(struct __sk_buff *skb)
__be16 proto; __be16 proto;
/* Peek back to see if single or double-tagging */ /* Peek back to see if single or double-tagging */
if (bpf_skb_load_bytes(skb, keys->nhoff - sizeof(proto), &proto, if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
sizeof(proto))) sizeof(proto)))
return BPF_DROP; return BPF_DROP;
...@@ -354,14 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb) ...@@ -354,14 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)
if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
return BPF_DROP; return BPF_DROP;
keys->nhoff += sizeof(*vlan); keys->thoff += sizeof(*vlan);
} }
vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
if (!vlan) if (!vlan)
return BPF_DROP; return BPF_DROP;
keys->nhoff += sizeof(*vlan); keys->thoff += sizeof(*vlan);
/* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment