Commit 41ecc3d3 authored by David S. Miller's avatar David S. Miller

Merge branch 'flow-dissector-features'

Tom Herbert says:

====================
flow_dissector: Paramterize dissection and other features

This patch set adds some new capabilities to flow_dissector:

- Add flags to flow dissector functions to control dissection
  - Flag to stop dissection when L3 header is seen (don't
    dissect L4)
  - Flag to stop dissection when encapsulation is detected
  - Flag to parse first fragment of fragmented packet. This
    may provide L4 ports
- Added new reporting in key_control
  - Packet is a fragment
  - Packet is a first fragment
  - Packet has encapsulation

Also:
  - Make __skb_set_sw_hash a general function
  - Create functions to get a flow hash based on flowi4 or flowi6
    structures without an reference to an skbuff
  - Ignore flow dissector return value from ___skb_get_hash. Just
    use whatever key fields are found to make a hash

Tested:

Ran 200 netperf TCP_RR instances for IPv6 and IPv4. Did not see any
regression. Ran UDP_RR with 10000 byte request and response size
for IPv4 and IPv6, no regression observed however I did see better
performance with IPv6 flow labels due to use of flow labels for L4
hash.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d3d11fe0 6db61d79
......@@ -3095,7 +3095,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
int noff, proto = -1;
if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
return skb_flow_dissect_flow_keys(skb, fk);
return skb_flow_dissect_flow_keys(skb, fk, 0);
fk->ports.ports = 0;
noff = skb_network_offset(skb);
......
......@@ -177,7 +177,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
int res, i;
enic = netdev_priv(dev);
res = skb_flow_dissect_flow_keys(skb, &keys);
res = skb_flow_dissect_flow_keys(skb, &keys, 0);
if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
(keys.basic.ip_proto != IPPROTO_TCP &&
keys.basic.ip_proto != IPPROTO_UDP))
......
......@@ -239,7 +239,7 @@ static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
struct flow_keys flow;
int data_len;
if (!skb_flow_dissect_flow_keys(skb, &flow) ||
if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
!(flow.basic.n_proto == htons(ETH_P_IP) ||
flow.basic.n_proto == htons(ETH_P_IPV6)))
return false;
......
......@@ -937,14 +937,90 @@ enum pkt_hash_types {
PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
};
static inline void
skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
static inline void skb_clear_hash(struct sk_buff *skb)
{
skb->l4_hash = (type == PKT_HASH_TYPE_L4);
skb->hash = 0;
skb->sw_hash = 0;
skb->l4_hash = 0;
}
static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
{
if (!skb->l4_hash)
skb_clear_hash(skb);
}
static inline void
__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
{
skb->l4_hash = is_l4;
skb->sw_hash = is_sw;
skb->hash = hash;
}
static inline void
skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
{
/* Used by drivers to set hash from HW */
__skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
}
static inline void
__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
{
__skb_set_hash(skb, hash, true, is_l4);
}
void __skb_get_hash(struct sk_buff *skb);
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
const struct flow_keys *keys, int hlen);
__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
void *data, int hlen_proto);
static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
int thoff, u8 ip_proto)
{
return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
}
void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
const struct flow_dissector_key *key,
unsigned int key_count);
bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
void *data, __be16 proto, int nhoff, int hlen,
unsigned int flags);
static inline bool skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container, unsigned int flags)
{
return __skb_flow_dissect(skb, flow_dissector, target_container,
NULL, 0, 0, 0, flags);
}
static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
struct flow_keys *flow,
unsigned int flags)
{
memset(flow, 0, sizeof(*flow));
return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
NULL, 0, 0, 0, flags);
}
static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
void *data, __be16 proto,
int nhoff, int hlen,
unsigned int flags)
{
memset(flow, 0, sizeof(*flow));
return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
data, proto, nhoff, hlen, flags);
}
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
......@@ -957,8 +1033,12 @@ __u32 __skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6);
static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6)
{
if (!skb->l4_hash && !skb->sw_hash)
__skb_get_hash_flowi6(skb, fl6);
if (!skb->l4_hash && !skb->sw_hash) {
struct flow_keys keys;
__skb_set_sw_hash(skb, __get_hash_from_flowi6(fl6, &keys),
flow_keys_have_l4(&keys));
}
return skb->hash;
}
......@@ -967,8 +1047,12 @@ __u32 __skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl);
static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl4)
{
if (!skb->l4_hash && !skb->sw_hash)
__skb_get_hash_flowi4(skb, fl4);
if (!skb->l4_hash && !skb->sw_hash) {
struct flow_keys keys;
__skb_set_sw_hash(skb, __get_hash_from_flowi4(fl4, &keys),
flow_keys_have_l4(&keys));
}
return skb->hash;
}
......@@ -980,19 +1064,6 @@ static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
return skb->hash;
}
static inline void skb_clear_hash(struct sk_buff *skb)
{
skb->hash = 0;
skb->sw_hash = 0;
skb->l4_hash = 0;
}
static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
{
if (!skb->l4_hash)
skb_clear_hash(skb);
}
static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
{
to->hash = from->hash;
......@@ -1978,7 +2049,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_transport_header_was_set(skb))
return;
else if (skb_flow_dissect_flow_keys(skb, &keys))
else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
skb_set_transport_header(skb, keys.control.thoff);
else
skb_set_transport_header(skb, offset_hint);
......
......@@ -10,6 +10,7 @@
#include <linux/socket.h>
#include <linux/in6.h>
#include <linux/atomic.h>
#include <net/flow_dissector.h>
/*
* ifindex generation is per-net namespace, and loopback is
......@@ -243,4 +244,22 @@ void flow_cache_flush(struct net *net);
void flow_cache_flush_deferred(struct net *net);
extern atomic_t flow_cache_genid;
__u32 __get_hash_from_flowi6(struct flowi6 *fl6, struct flow_keys *keys);
static inline __u32 get_hash_from_flowi6(struct flowi6 *fl6)
{
struct flow_keys keys;
return __get_hash_from_flowi6(fl6, &keys);
}
__u32 __get_hash_from_flowi4(struct flowi4 *fl4, struct flow_keys *keys);
static inline __u32 get_hash_from_flowi4(struct flowi4 *fl4)
{
struct flow_keys keys;
return __get_hash_from_flowi4(fl4, &keys);
}
#endif
......@@ -2,7 +2,6 @@
#define _NET_FLOW_DISSECTOR_H
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/in6.h>
#include <uapi/linux/if_ether.h>
......@@ -13,6 +12,9 @@
struct flow_dissector_key_control {
u16 thoff;
u16 addr_type;
u32 is_fragment:1;
u32 first_frag:1;
u32 encapsulation:1;
};
/**
......@@ -123,6 +125,11 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_MAX,
};
#define FLOW_DISSECTOR_F_PARSE_1ST_FRAG BIT(0)
#define FLOW_DISSECTOR_F_STOP_AT_L3 BIT(1)
#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(2)
#define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(3)
struct flow_dissector_key {
enum flow_dissector_key_id key_id;
size_t offset; /* offset of struct flow_dissector_key_*
......@@ -134,23 +141,6 @@ struct flow_dissector {
unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
};
void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
const struct flow_dissector_key *key,
unsigned int key_count);
bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
void *data, __be16 proto, int nhoff, int hlen);
static inline bool skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container)
{
return __skb_flow_dissect(skb, flow_dissector, target_container,
NULL, 0, 0, 0);
}
struct flow_keys {
struct flow_dissector_key_control control;
#define FLOW_KEYS_HASH_START_FIELD basic
......@@ -170,38 +160,6 @@ __be32 flow_get_u32_dst(const struct flow_keys *flow);
extern struct flow_dissector flow_keys_dissector;
extern struct flow_dissector flow_keys_buf_dissector;
static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
struct flow_keys *flow)
{
memset(flow, 0, sizeof(*flow));
return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
NULL, 0, 0, 0);
}
static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
void *data, __be16 proto,
int nhoff, int hlen)
{
memset(flow, 0, sizeof(*flow));
return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
data, proto, nhoff, hlen);
}
__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
void *data, int hlen_proto);
static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
int thoff, u8 ip_proto)
{
return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
}
u32 flow_hash_from_keys(struct flow_keys *keys);
void __skb_get_hash(struct sk_buff *skb);
u32 skb_get_poff(const struct sk_buff *skb);
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
const struct flow_keys *keys, int hlen);
/* struct flow_keys_digest:
*
* This structure is used to hold a digest of the full flow keys. This is a
......@@ -217,4 +175,11 @@ struct flow_keys_digest {
void make_flow_keys_digest(struct flow_keys_digest *digest,
const struct flow_keys *flow);
static inline bool flow_keys_have_l4(struct flow_keys *keys)
{
return (keys->ports.ports || keys->tags.flow_label);
}
u32 flow_hash_from_keys(struct flow_keys *keys);
#endif
......@@ -22,6 +22,7 @@
#include <linux/cpumask.h>
#include <linux/mutex.h>
#include <net/flow.h>
#include <net/flow_dissector.h>
#include <linux/atomic.h>
#include <linux/security.h>
#include <net/net_namespace.h>
......@@ -509,3 +510,38 @@ void flow_cache_fini(struct net *net)
fc->percpu = NULL;
}
EXPORT_SYMBOL(flow_cache_fini);
__u32 __get_hash_from_flowi6(struct flowi6 *fl6, struct flow_keys *keys)
{
memset(keys, 0, sizeof(*keys));
memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
sizeof(keys->addrs.v6addrs.src));
memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
sizeof(keys->addrs.v6addrs.dst));
keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
keys->ports.src = fl6->fl6_sport;
keys->ports.dst = fl6->fl6_dport;
keys->keyid.keyid = fl6->fl6_gre_key;
keys->tags.flow_label = (__force u32)fl6->flowlabel;
keys->basic.ip_proto = fl6->flowi6_proto;
return flow_hash_from_keys(keys);
}
EXPORT_SYMBOL(__get_hash_from_flowi6);
__u32 __get_hash_from_flowi4(struct flowi4 *fl4, struct flow_keys *keys)
{
memset(keys, 0, sizeof(*keys));
keys->addrs.v4addrs.src = fl4->saddr;
keys->addrs.v4addrs.dst = fl4->daddr;
keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
keys->ports.src = fl4->fl4_sport;
keys->ports.dst = fl4->fl4_dport;
keys->keyid.keyid = fl4->fl4_gre_key;
keys->basic.ip_proto = fl4->flowi4_proto;
return flow_hash_from_keys(keys);
}
EXPORT_SYMBOL(__get_hash_from_flowi4);
......@@ -121,7 +121,8 @@ EXPORT_SYMBOL(__skb_flow_get_ports);
bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
void *data, __be16 proto, int nhoff, int hlen)
void *data, __be16 proto, int nhoff, int hlen,
unsigned int flags)
{
struct flow_dissector_key_control *key_control;
struct flow_dissector_key_basic *key_basic;
......@@ -130,6 +131,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector_key_tags *key_tags;
struct flow_dissector_key_keyid *key_keyid;
u8 ip_proto = 0;
bool ret = false;
if (!data) {
data = skb->data;
......@@ -171,12 +173,10 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
ip:
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
if (!iph || iph->ihl < 5)
return false;
goto out_bad;
nhoff += iph->ihl * 4;
ip_proto = iph->protocol;
if (ip_is_fragment(iph))
ip_proto = 0;
if (!skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS))
......@@ -187,6 +187,22 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
memcpy(&key_addrs->v4addrs, &iph->saddr,
sizeof(key_addrs->v4addrs));
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
if (ip_is_fragment(iph)) {
key_control->is_fragment = 1;
if (iph->frag_off & htons(IP_OFFSET)) {
goto out_good;
} else {
key_control->first_frag = 1;
if (!(flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG))
goto out_good;
}
}
if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
goto out_good;
break;
}
case htons(ETH_P_IPV6): {
......@@ -197,7 +213,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
ipv6:
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
if (!iph)
return false;
goto out_bad;
ip_proto = iph->nexthdr;
nhoff += sizeof(struct ipv6hdr);
......@@ -223,8 +239,13 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
target_container);
key_tags->flow_label = ntohl(flow_label);
}
if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
goto out_good;
}
if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
goto out_good;
break;
}
case htons(ETH_P_8021AD):
......@@ -234,7 +255,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan);
if (!vlan)
return false;
goto out_bad;
if (skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_VLANID)) {
......@@ -256,7 +277,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
} *hdr, _hdr;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
return false;
goto out_bad;
proto = hdr->proto;
nhoff += PPPOE_SES_HLEN;
switch (proto) {
......@@ -265,7 +286,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
case htons(PPP_IPV6):
goto ipv6;
default:
return false;
goto out_bad;
}
}
case htons(ETH_P_TIPC): {
......@@ -275,9 +296,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
} *hdr, _hdr;
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
return false;
key_basic->n_proto = proto;
key_control->thoff = (u16)nhoff;
goto out_bad;
if (skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
......@@ -287,7 +306,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
key_addrs->tipcaddrs.srcnode = hdr->srcnode;
key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
}
return true;
goto out_good;
}
case htons(ETH_P_MPLS_UC):
......@@ -297,7 +316,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
hlen, &_hdr);
if (!hdr)
return false;
goto out_bad;
if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
......@@ -310,21 +329,17 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
htonl(MPLS_LS_LABEL_MASK);
}
key_basic->n_proto = proto;
key_basic->ip_proto = ip_proto;
key_control->thoff = (u16)nhoff;
return true;
goto out_good;
}
return true;
goto out_good;
}
case htons(ETH_P_FCOE):
key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
/* fall through */
default:
return false;
goto out_bad;
}
ip_proto_again:
......@@ -337,7 +352,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
return false;
goto out_bad;
/*
* Only look inside GRE if version zero and no
* routing
......@@ -357,7 +372,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
data, hlen, &_keyid);
if (!keyid)
return false;
goto out_bad;
if (skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_GRE_KEYID)) {
......@@ -378,10 +393,15 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
sizeof(_eth),
data, hlen, &_eth);
if (!eth)
return false;
goto out_bad;
proto = eth->h_proto;
nhoff += sizeof(*eth);
}
key_control->encapsulation = 1;
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
goto out_good;
goto again;
}
case NEXTHDR_HOP:
......@@ -395,18 +415,53 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
data, hlen, &_opthdr);
if (!opthdr)
return false;
goto out_bad;
ip_proto = opthdr[0];
nhoff += (opthdr[1] + 1) << 3;
goto ip_proto_again;
}
case NEXTHDR_FRAGMENT: {
struct frag_hdr _fh, *fh;
if (proto != htons(ETH_P_IPV6))
break;
fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
data, hlen, &_fh);
if (!fh)
goto out_bad;
key_control->is_fragment = 1;
nhoff += sizeof(_fh);
if (!(fh->frag_off & htons(IP6_OFFSET))) {
key_control->first_frag = 1;
if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
ip_proto = fh->nexthdr;
goto ip_proto_again;
}
}
goto out_good;
}
case IPPROTO_IPIP:
proto = htons(ETH_P_IP);
key_control->encapsulation = 1;
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
goto out_good;
goto ip;
case IPPROTO_IPV6:
proto = htons(ETH_P_IPV6);
key_control->encapsulation = 1;
if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
goto out_good;
goto ipv6;
case IPPROTO_MPLS:
proto = htons(ETH_P_MPLS_UC);
......@@ -415,10 +470,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
break;
}
key_basic->n_proto = proto;
key_basic->ip_proto = ip_proto;
key_control->thoff = (u16)nhoff;
if (skb_flow_dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS)) {
key_ports = skb_flow_dissector_target(flow_dissector,
......@@ -428,7 +479,15 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
data, hlen);
}
return true;
out_good:
ret = true;
out_bad:
key_basic->n_proto = proto;
key_basic->ip_proto = ip_proto;
key_control->thoff = (u16)nhoff;
return ret;
}
EXPORT_SYMBOL(__skb_flow_dissect);
......@@ -557,8 +616,8 @@ EXPORT_SYMBOL(flow_hash_from_keys);
static inline u32 ___skb_get_hash(const struct sk_buff *skb,
struct flow_keys *keys, u32 keyval)
{
if (!skb_flow_dissect_flow_keys(skb, keys))
return 0;
skb_flow_dissect_flow_keys(skb, keys,
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
return __flow_hash_from_keys(keys, keyval);
}
......@@ -590,15 +649,6 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
}
EXPORT_SYMBOL(make_flow_keys_digest);
static inline void __skb_set_sw_hash(struct sk_buff *skb, u32 hash,
struct flow_keys *keys)
{
if (keys->ports.ports)
skb->l4_hash = 1;
skb->sw_hash = 1;
skb->hash = hash;
}
/**
* __skb_get_hash: calculate a flow hash
* @skb: sk_buff to calculate flow hash from
......@@ -611,15 +661,11 @@ static inline void __skb_set_sw_hash(struct sk_buff *skb, u32 hash,
void __skb_get_hash(struct sk_buff *skb)
{
struct flow_keys keys;
u32 hash;
__flow_hash_secret_init();
hash = ___skb_get_hash(skb, &keys, hashrnd);
if (!hash)
return;
__skb_set_sw_hash(skb, hash, &keys);
__skb_set_sw_hash(skb, ___skb_get_hash(skb, &keys, hashrnd),
flow_keys_have_l4(&keys));
}
EXPORT_SYMBOL(__skb_get_hash);
......@@ -648,7 +694,8 @@ __u32 __skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6)
keys.tags.flow_label = (__force u32)fl6->flowlabel;
keys.basic.ip_proto = fl6->flowi6_proto;
__skb_set_sw_hash(skb, flow_hash_from_keys(&keys), &keys);
__skb_set_sw_hash(skb, flow_hash_from_keys(&keys),
flow_keys_have_l4(&keys));
return skb->hash;
}
......@@ -668,7 +715,8 @@ __u32 __skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl4)
keys.keyid.keyid = fl4->fl4_gre_key;
keys.basic.ip_proto = fl4->flowi4_proto;
__skb_set_sw_hash(skb, flow_hash_from_keys(&keys), &keys);
__skb_set_sw_hash(skb, flow_hash_from_keys(&keys),
flow_keys_have_l4(&keys));
return skb->hash;
}
......@@ -733,7 +781,7 @@ u32 skb_get_poff(const struct sk_buff *skb)
{
struct flow_keys keys;
if (!skb_flow_dissect_flow_keys(skb, &keys))
if (!skb_flow_dissect_flow_keys(skb, &keys, 0))
return 0;
return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
......
......@@ -132,7 +132,7 @@ u32 eth_get_headlen(void *data, unsigned int len)
/* parse any remaining L2/L3 headers, check for L4 */
if (!skb_flow_dissect_flow_keys_buf(&keys, data, eth->h_proto,
sizeof(*eth), len))
sizeof(*eth), len, 0))
return max_t(u32, keys.control.thoff, sizeof(*eth));
/* parse for any L4 headers */
......
......@@ -301,7 +301,7 @@ static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
keymask = f->keymask;
if (keymask & FLOW_KEYS_NEEDED)
skb_flow_dissect_flow_keys(skb, &flow_keys);
skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
for (n = 0; n < f->nkeys; n++) {
key = ffs(keymask) - 1;
......
......@@ -129,7 +129,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
* so do it rather here.
*/
skb_key.basic.n_proto = skb->protocol;
skb_flow_dissect(skb, &head->dissector, &skb_key);
skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
......
......@@ -170,13 +170,13 @@ static bool choke_match_flow(struct sk_buff *skb1,
if (!choke_skb_cb(skb1)->keys_valid) {
choke_skb_cb(skb1)->keys_valid = 1;
skb_flow_dissect_flow_keys(skb1, &temp);
skb_flow_dissect_flow_keys(skb1, &temp, 0);
make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
}
if (!choke_skb_cb(skb2)->keys_valid) {
choke_skb_cb(skb2)->keys_valid = 1;
skb_flow_dissect_flow_keys(skb2, &temp);
skb_flow_dissect_flow_keys(skb2, &temp, 0);
make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment