Commit 08617f47 authored by David S. Miller's avatar David S. Miller

Merge branch 'lwt-ipv6'

Jiri Benc says:

====================
lwtunnel: per route ipv6 support for vxlan

v3: Moved LWTUNNEL_ENCAP_IP6 definition in patch 13.
v2: Fixed issues in patch 4 pointed out by Alexei.

This series enables IPv6 tunnels based on lwtunnel infrastructure. Only
vxlan is supported for now.

Tested in all combinations of IPv4 over IPv6, IPv6 over IPv4 and IPv6 over
IPv6.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 938049e1 32a2b002
...@@ -295,7 +295,6 @@ static struct rtable *vrf_rtable_create(struct net_device *dev) ...@@ -295,7 +295,6 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
rth->rt_uses_gateway = 0; rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached); INIT_LIST_HEAD(&rth->rt_uncached);
rth->rt_uncached_list = NULL; rth->rt_uncached_list = NULL;
rth->rt_lwtstate = NULL;
} }
return rth; return rth;
......
...@@ -236,7 +236,7 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, ...@@ -236,7 +236,7 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
if (inet_sk(vs->sock->sk)->inet_sport == port && if (inet_sk(vs->sock->sk)->inet_sport == port &&
inet_sk(vs->sock->sk)->sk.sk_family == family && vxlan_get_sk_family(vs) == family &&
vs->flags == flags) vs->flags == flags)
return vs; return vs;
} }
...@@ -625,7 +625,7 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs) ...@@ -625,7 +625,7 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
struct net_device *dev; struct net_device *dev;
struct sock *sk = vs->sock->sk; struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk); struct net *net = sock_net(sk);
sa_family_t sa_family = sk->sk_family; sa_family_t sa_family = vxlan_get_sk_family(vs);
__be16 port = inet_sk(sk)->inet_sport; __be16 port = inet_sk(sk)->inet_sport;
int err; int err;
...@@ -650,7 +650,7 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs) ...@@ -650,7 +650,7 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
struct net_device *dev; struct net_device *dev;
struct sock *sk = vs->sock->sk; struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk); struct net *net = sock_net(sk);
sa_family_t sa_family = sk->sk_family; sa_family_t sa_family = vxlan_get_sk_family(vs);
__be16 port = inet_sk(sk)->inet_sport; __be16 port = inet_sk(sk)->inet_sport;
rcu_read_lock(); rcu_read_lock();
...@@ -1269,17 +1269,27 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) ...@@ -1269,17 +1269,27 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
} }
if (vxlan_collect_metadata(vs)) { if (vxlan_collect_metadata(vs)) {
const struct iphdr *iph = ip_hdr(skb);
tun_dst = metadata_dst_alloc(sizeof(*md), GFP_ATOMIC); tun_dst = metadata_dst_alloc(sizeof(*md), GFP_ATOMIC);
if (!tun_dst) if (!tun_dst)
goto drop; goto drop;
info = &tun_dst->u.tun_info; info = &tun_dst->u.tun_info;
info->key.ipv4_src = iph->saddr; if (vxlan_get_sk_family(vs) == AF_INET) {
info->key.ipv4_dst = iph->daddr; const struct iphdr *iph = ip_hdr(skb);
info->key.ipv4_tos = iph->tos;
info->key.ipv4_ttl = iph->ttl; info->key.u.ipv4.src = iph->saddr;
info->key.u.ipv4.dst = iph->daddr;
info->key.tos = iph->tos;
info->key.ttl = iph->ttl;
} else {
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
info->key.u.ipv6.src = ip6h->saddr;
info->key.u.ipv6.dst = ip6h->daddr;
info->key.tos = ipv6_get_dsfield(ip6h);
info->key.ttl = ip6h->hop_limit;
}
info->key.tp_src = udp_hdr(skb)->source; info->key.tp_src = udp_hdr(skb)->source;
info->key.tp_dst = udp_hdr(skb)->dest; info->key.tp_dst = udp_hdr(skb)->dest;
...@@ -1894,6 +1904,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -1894,6 +1904,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info; struct ip_tunnel_info *info;
struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_dev *vxlan = netdev_priv(dev);
struct sock *sk = vxlan->vn_sock->sock->sk; struct sock *sk = vxlan->vn_sock->sock->sk;
unsigned short family = vxlan_get_sk_family(vxlan->vn_sock);
struct rtable *rt = NULL; struct rtable *rt = NULL;
const struct iphdr *old_iph; const struct iphdr *old_iph;
struct flowi4 fl4; struct flowi4 fl4;
...@@ -1908,8 +1919,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -1908,8 +1919,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
int err; int err;
u32 flags = vxlan->flags; u32 flags = vxlan->flags;
/* FIXME: Support IPv6 */ info = skb_tunnel_info(skb);
info = skb_tunnel_info(skb, AF_INET);
if (rdst) { if (rdst) {
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
...@@ -1924,8 +1934,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -1924,8 +1934,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
vni = be64_to_cpu(info->key.tun_id); vni = be64_to_cpu(info->key.tun_id);
remote_ip.sin.sin_family = AF_INET; remote_ip.sa.sa_family = family;
remote_ip.sin.sin_addr.s_addr = info->key.ipv4_dst; if (family == AF_INET)
remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
else
remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
dst = &remote_ip; dst = &remote_ip;
} }
...@@ -1951,23 +1964,24 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -1951,23 +1964,24 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
vxlan->cfg.port_max, true); vxlan->cfg.port_max, true);
if (info) {
if (info->key.tun_flags & TUNNEL_CSUM)
flags |= VXLAN_F_UDP_CSUM;
else
flags &= ~VXLAN_F_UDP_CSUM;
ttl = info->key.ttl;
tos = info->key.tos;
if (info->options_len)
md = ip_tunnel_info_opts(info, sizeof(*md));
} else {
md->gbp = skb->mark;
}
if (dst->sa.sa_family == AF_INET) { if (dst->sa.sa_family == AF_INET) {
if (info) { if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) df = htons(IP_DF);
df = htons(IP_DF);
if (info->key.tun_flags & TUNNEL_CSUM)
flags |= VXLAN_F_UDP_CSUM;
else
flags &= ~VXLAN_F_UDP_CSUM;
ttl = info->key.ipv4_ttl;
tos = info->key.ipv4_tos;
if (info->options_len)
md = ip_tunnel_info_opts(info, sizeof(*md));
} else {
md->gbp = skb->mark;
}
memset(&fl4, 0, sizeof(fl4)); memset(&fl4, 0, sizeof(fl4));
fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0; fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
...@@ -2025,7 +2039,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2025,7 +2039,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
} else { } else {
struct dst_entry *ndst; struct dst_entry *ndst;
struct flowi6 fl6; struct flowi6 fl6;
u32 flags; u32 rt6i_flags;
memset(&fl6, 0, sizeof(fl6)); memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0; fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
...@@ -2050,9 +2064,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2050,9 +2064,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
} }
/* Bypass encapsulation if the destination is local */ /* Bypass encapsulation if the destination is local */
flags = ((struct rt6_info *)ndst)->rt6i_flags; rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
if (flags & RTF_LOCAL && if (rt6i_flags & RTF_LOCAL &&
!(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
struct vxlan_dev *dst_vxlan; struct vxlan_dev *dst_vxlan;
dst_release(ndst); dst_release(ndst);
...@@ -2066,12 +2080,10 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -2066,12 +2080,10 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
} }
ttl = ttl ? : ip6_dst_hoplimit(ndst); ttl = ttl ? : ip6_dst_hoplimit(ndst);
md->gbp = skb->mark;
err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr, err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
0, ttl, src_port, dst_port, htonl(vni << 8), md, 0, ttl, src_port, dst_port, htonl(vni << 8), md,
!net_eq(vxlan->net, dev_net(vxlan->dev)), !net_eq(vxlan->net, dev_net(vxlan->dev)),
vxlan->flags); flags);
#endif #endif
} }
...@@ -2104,8 +2116,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2104,8 +2116,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
struct vxlan_rdst *rdst, *fdst = NULL; struct vxlan_rdst *rdst, *fdst = NULL;
struct vxlan_fdb *f; struct vxlan_fdb *f;
/* FIXME: Support IPv6 */ info = skb_tunnel_info(skb);
info = skb_tunnel_info(skb, AF_INET);
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
eth = eth_hdr(skb); eth = eth_hdr(skb);
...@@ -2390,7 +2401,7 @@ void vxlan_get_rx_port(struct net_device *dev) ...@@ -2390,7 +2401,7 @@ void vxlan_get_rx_port(struct net_device *dev)
for (i = 0; i < PORT_HASH_SIZE; ++i) { for (i = 0; i < PORT_HASH_SIZE; ++i) {
hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
port = inet_sk(vs->sock->sk)->inet_sport; port = inet_sk(vs->sock->sk)->inet_sport;
sa_family = vs->sock->sk->sk_family; sa_family = vxlan_get_sk_family(vs);
dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
port); port);
} }
......
...@@ -44,6 +44,7 @@ struct dst_entry { ...@@ -44,6 +44,7 @@ struct dst_entry {
#else #else
void *__pad1; void *__pad1;
#endif #endif
struct lwtunnel_state *lwtstate;
int (*input)(struct sk_buff *); int (*input)(struct sk_buff *);
int (*output)(struct sock *sk, struct sk_buff *skb); int (*output)(struct sock *sk, struct sk_buff *skb);
...@@ -89,7 +90,7 @@ struct dst_entry { ...@@ -89,7 +90,7 @@ struct dst_entry {
* (L1_CACHE_SIZE would be too much) * (L1_CACHE_SIZE would be too much)
*/ */
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
long __pad_to_align_refcnt[2]; long __pad_to_align_refcnt[1];
#endif #endif
/* /*
* __refcnt wants to be on a different cache line from * __refcnt wants to be on a different cache line from
......
...@@ -23,22 +23,17 @@ static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb) ...@@ -23,22 +23,17 @@ static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb)
return NULL; return NULL;
} }
static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb, static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb)
int family)
{ {
struct metadata_dst *md_dst = skb_metadata_dst(skb); struct metadata_dst *md_dst = skb_metadata_dst(skb);
struct rtable *rt; struct dst_entry *dst;
if (md_dst) if (md_dst)
return &md_dst->u.tun_info; return &md_dst->u.tun_info;
switch (family) { dst = skb_dst(skb);
case AF_INET: if (dst && dst->lwtstate)
rt = (struct rtable *)skb_dst(skb); return lwt_tun_info(dst->lwtstate);
if (rt && rt->rt_lwtstate)
return lwt_tun_info(rt->rt_lwtstate);
break;
}
return NULL; return NULL;
} }
......
...@@ -130,6 +130,7 @@ struct flowi6 { ...@@ -130,6 +130,7 @@ struct flowi6 {
#define flowi6_proto __fl_common.flowic_proto #define flowi6_proto __fl_common.flowic_proto
#define flowi6_flags __fl_common.flowic_flags #define flowi6_flags __fl_common.flowic_flags
#define flowi6_secid __fl_common.flowic_secid #define flowi6_secid __fl_common.flowic_secid
#define flowi6_tun_key __fl_common.flowic_tun_key
struct in6_addr daddr; struct in6_addr daddr;
struct in6_addr saddr; struct in6_addr saddr;
__be32 flowlabel; __be32 flowlabel;
......
...@@ -133,7 +133,6 @@ struct rt6_info { ...@@ -133,7 +133,6 @@ struct rt6_info {
/* more non-fragment space at head required */ /* more non-fragment space at head required */
unsigned short rt6i_nfheader_len; unsigned short rt6i_nfheader_len;
u8 rt6i_protocol; u8 rt6i_protocol;
struct lwtunnel_state *rt6i_lwtstate;
}; };
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
......
...@@ -23,20 +23,32 @@ ...@@ -23,20 +23,32 @@
#define IPTUNNEL_ERR_TIMEO (30*HZ) #define IPTUNNEL_ERR_TIMEO (30*HZ)
/* Used to memset ip_tunnel padding. */ /* Used to memset ip_tunnel padding. */
#define IP_TUNNEL_KEY_SIZE \ #define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
(offsetof(struct ip_tunnel_key, tp_dst) + \
FIELD_SIZEOF(struct ip_tunnel_key, tp_dst)) /* Used to memset ipv4 address padding. */
#define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
#define IP_TUNNEL_KEY_IPV4_PAD_LEN \
(FIELD_SIZEOF(struct ip_tunnel_key, u) - \
FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
struct ip_tunnel_key { struct ip_tunnel_key {
__be64 tun_id; __be64 tun_id;
__be32 ipv4_src; union {
__be32 ipv4_dst; struct {
__be32 src;
__be32 dst;
} ipv4;
struct {
struct in6_addr src;
struct in6_addr dst;
} ipv6;
} u;
__be16 tun_flags; __be16 tun_flags;
__u8 ipv4_tos; u8 tos; /* TOS for IPv4, TC for IPv6 */
__u8 ipv4_ttl; u8 ttl; /* TTL for IPv4, HL for IPv6 */
__be16 tp_src; __be16 tp_src;
__be16 tp_dst; __be16 tp_dst;
} __packed __aligned(4); /* Minimize padding. */ };
/* Indicates whether the tunnel info structure represents receive /* Indicates whether the tunnel info structure represents receive
* or transmit tunnel parameters. * or transmit tunnel parameters.
...@@ -64,8 +76,8 @@ struct ip_tunnel_6rd_parm { ...@@ -64,8 +76,8 @@ struct ip_tunnel_6rd_parm {
#endif #endif
struct ip_tunnel_encap { struct ip_tunnel_encap {
__u16 type; u16 type;
__u16 flags; u16 flags;
__be16 sport; __be16 sport;
__be16 dport; __be16 dport;
}; };
...@@ -95,8 +107,8 @@ struct ip_tunnel { ...@@ -95,8 +107,8 @@ struct ip_tunnel {
* arrived */ * arrived */
/* These four fields used only by GRE */ /* These four fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */ u32 i_seqno; /* The last seen seqno */
__u32 o_seqno; /* The last output seqno */ u32 o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */ int tun_hlen; /* Precalculated header length */
int mlink; int mlink;
...@@ -179,10 +191,12 @@ static inline void __ip_tunnel_info_init(struct ip_tunnel_info *tun_info, ...@@ -179,10 +191,12 @@ static inline void __ip_tunnel_info_init(struct ip_tunnel_info *tun_info,
const void *opts, u8 opts_len) const void *opts, u8 opts_len)
{ {
tun_info->key.tun_id = tun_id; tun_info->key.tun_id = tun_id;
tun_info->key.ipv4_src = saddr; tun_info->key.u.ipv4.src = saddr;
tun_info->key.ipv4_dst = daddr; tun_info->key.u.ipv4.dst = daddr;
tun_info->key.ipv4_tos = tos; memset((unsigned char *)&tun_info->key + IP_TUNNEL_KEY_IPV4_PAD,
tun_info->key.ipv4_ttl = ttl; 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
tun_info->key.tos = tos;
tun_info->key.ttl = ttl;
tun_info->key.tun_flags = tun_flags; tun_info->key.tun_flags = tun_flags;
/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
...@@ -273,8 +287,8 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, ...@@ -273,8 +287,8 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto, __be32 src, __be32 dst, u8 proto,
__u8 tos, __u8 ttl, __be16 df, bool xnet); u8 tos, u8 ttl, __be16 df, bool xnet);
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
int gso_type_mask); int gso_type_mask);
......
...@@ -87,9 +87,7 @@ int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate); ...@@ -87,9 +87,7 @@ int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len); struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b); int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
int lwtunnel_output(struct sock *sk, struct sk_buff *skb); int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
int lwtunnel_output6(struct sock *sk, struct sk_buff *skb);
int lwtunnel_input(struct sk_buff *skb); int lwtunnel_input(struct sk_buff *skb);
int lwtunnel_input6(struct sk_buff *skb);
#else #else
...@@ -164,21 +162,11 @@ static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb) ...@@ -164,21 +162,11 @@ static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
{
return -EOPNOTSUPP;
}
static inline int lwtunnel_input(struct sk_buff *skb) static inline int lwtunnel_input(struct sk_buff *skb)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline int lwtunnel_input6(struct sk_buff *skb)
{
return -EOPNOTSUPP;
}
#endif #endif
#endif /* __NET_LWTUNNEL_H */ #endif /* __NET_LWTUNNEL_H */
...@@ -182,7 +182,8 @@ int ndisc_rcv(struct sk_buff *skb); ...@@ -182,7 +182,8 @@ int ndisc_rcv(struct sk_buff *skb);
void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
const struct in6_addr *solicit, const struct in6_addr *solicit,
const struct in6_addr *daddr, const struct in6_addr *saddr); const struct in6_addr *daddr, const struct in6_addr *saddr,
struct sk_buff *oskb);
void ndisc_send_rs(struct net_device *dev, void ndisc_send_rs(struct net_device *dev,
const struct in6_addr *saddr, const struct in6_addr *daddr); const struct in6_addr *saddr, const struct in6_addr *daddr);
......
...@@ -66,7 +66,6 @@ struct rtable { ...@@ -66,7 +66,6 @@ struct rtable {
struct list_head rt_uncached; struct list_head rt_uncached;
struct uncached_list *rt_uncached_list; struct uncached_list *rt_uncached_list;
struct lwtunnel_state *rt_lwtstate;
}; };
static inline bool rt_is_input_route(const struct rtable *rt) static inline bool rt_is_input_route(const struct rtable *rt)
......
...@@ -241,3 +241,8 @@ static inline void vxlan_get_rx_port(struct net_device *netdev) ...@@ -241,3 +241,8 @@ static inline void vxlan_get_rx_port(struct net_device *netdev)
} }
#endif #endif
#endif #endif
static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
{
return vs->sock->sk->sk_family;
}
...@@ -8,6 +8,7 @@ enum lwtunnel_encap_types { ...@@ -8,6 +8,7 @@ enum lwtunnel_encap_types {
LWTUNNEL_ENCAP_MPLS, LWTUNNEL_ENCAP_MPLS,
LWTUNNEL_ENCAP_IP, LWTUNNEL_ENCAP_IP,
LWTUNNEL_ENCAP_ILA, LWTUNNEL_ENCAP_ILA,
LWTUNNEL_ENCAP_IP6,
__LWTUNNEL_ENCAP_MAX, __LWTUNNEL_ENCAP_MAX,
}; };
...@@ -28,4 +29,19 @@ enum lwtunnel_ip_t { ...@@ -28,4 +29,19 @@ enum lwtunnel_ip_t {
#define LWTUNNEL_IP_MAX (__LWTUNNEL_IP_MAX - 1) #define LWTUNNEL_IP_MAX (__LWTUNNEL_IP_MAX - 1)
enum lwtunnel_ip6_t {
LWTUNNEL_IP6_UNSPEC,
LWTUNNEL_IP6_ID,
LWTUNNEL_IP6_DST,
LWTUNNEL_IP6_SRC,
LWTUNNEL_IP6_HOPLIMIT,
LWTUNNEL_IP6_TC,
LWTUNNEL_IP6_SPORT,
LWTUNNEL_IP6_DPORT,
LWTUNNEL_IP6_FLAGS,
__LWTUNNEL_IP6_MAX,
};
#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
#endif /* _UAPI_LWTUNNEL_H_ */ #endif /* _UAPI_LWTUNNEL_H_ */
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <net/net_namespace.h> #include <net/net_namespace.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <net/lwtunnel.h>
#include <net/dst.h> #include <net/dst.h>
#include <net/dst_metadata.h> #include <net/dst_metadata.h>
...@@ -184,6 +185,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops, ...@@ -184,6 +185,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
#ifdef CONFIG_IP_ROUTE_CLASSID #ifdef CONFIG_IP_ROUTE_CLASSID
dst->tclassid = 0; dst->tclassid = 0;
#endif #endif
dst->lwtstate = NULL;
atomic_set(&dst->__refcnt, initial_ref); atomic_set(&dst->__refcnt, initial_ref);
dst->__use = 0; dst->__use = 0;
dst->lastuse = jiffies; dst->lastuse = jiffies;
...@@ -264,6 +266,7 @@ struct dst_entry *dst_destroy(struct dst_entry * dst) ...@@ -264,6 +266,7 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
kfree(dst); kfree(dst);
else else
kmem_cache_free(dst->ops->kmem_cachep, dst); kmem_cache_free(dst->ops->kmem_cachep, dst);
lwtstate_put(dst->lwtstate);
dst = child; dst = child;
if (dst) { if (dst) {
......
...@@ -1489,13 +1489,13 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) ...@@ -1489,13 +1489,13 @@ static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
{ {
struct sk_buff *skb = (struct sk_buff *) (long) r1; struct sk_buff *skb = (struct sk_buff *) (long) r1;
struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2; struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
struct ip_tunnel_info *info = skb_tunnel_info(skb, AF_INET); struct ip_tunnel_info *info = skb_tunnel_info(skb);
if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info)) if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info))
return -EINVAL; return -EINVAL;
to->tunnel_id = be64_to_cpu(info->key.tun_id); to->tunnel_id = be64_to_cpu(info->key.tun_id);
to->remote_ipv4 = be32_to_cpu(info->key.ipv4_src); to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
return 0; return 0;
} }
...@@ -1529,7 +1529,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) ...@@ -1529,7 +1529,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
info = &md->u.tun_info; info = &md->u.tun_info;
info->mode = IP_TUNNEL_INFO_TX; info->mode = IP_TUNNEL_INFO_TX;
info->key.tun_id = cpu_to_be64(from->tunnel_id); info->key.tun_id = cpu_to_be64(from->tunnel_id);
info->key.ipv4_dst = cpu_to_be32(from->remote_ipv4); info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
return 0; return 0;
} }
......
...@@ -179,14 +179,16 @@ int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b) ...@@ -179,14 +179,16 @@ int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
} }
EXPORT_SYMBOL(lwtunnel_cmp_encap); EXPORT_SYMBOL(lwtunnel_cmp_encap);
int __lwtunnel_output(struct sock *sk, struct sk_buff *skb, int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
struct lwtunnel_state *lwtstate)
{ {
struct dst_entry *dst = skb_dst(skb);
const struct lwtunnel_encap_ops *ops; const struct lwtunnel_encap_ops *ops;
struct lwtunnel_state *lwtstate;
int ret = -EINVAL; int ret = -EINVAL;
if (!lwtstate) if (!dst)
goto drop; goto drop;
lwtstate = dst->lwtstate;
if (lwtstate->type == LWTUNNEL_ENCAP_NONE || if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
lwtstate->type > LWTUNNEL_ENCAP_MAX) lwtstate->type > LWTUNNEL_ENCAP_MAX)
...@@ -209,47 +211,18 @@ int __lwtunnel_output(struct sock *sk, struct sk_buff *skb, ...@@ -209,47 +211,18 @@ int __lwtunnel_output(struct sock *sk, struct sk_buff *skb,
return ret; return ret;
} }
int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
{
struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
struct lwtunnel_state *lwtstate = NULL;
if (rt) {
lwtstate = rt->rt6i_lwtstate;
skb->dev = rt->dst.dev;
}
skb->protocol = htons(ETH_P_IPV6);
return __lwtunnel_output(sk, skb, lwtstate);
}
EXPORT_SYMBOL(lwtunnel_output6);
int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
{
struct rtable *rt = (struct rtable *)skb_dst(skb);
struct lwtunnel_state *lwtstate = NULL;
if (rt) {
lwtstate = rt->rt_lwtstate;
skb->dev = rt->dst.dev;
}
skb->protocol = htons(ETH_P_IP);
return __lwtunnel_output(sk, skb, lwtstate);
}
EXPORT_SYMBOL(lwtunnel_output); EXPORT_SYMBOL(lwtunnel_output);
int __lwtunnel_input(struct sk_buff *skb, int lwtunnel_input(struct sk_buff *skb)
struct lwtunnel_state *lwtstate)
{ {
struct dst_entry *dst = skb_dst(skb);
const struct lwtunnel_encap_ops *ops; const struct lwtunnel_encap_ops *ops;
struct lwtunnel_state *lwtstate;
int ret = -EINVAL; int ret = -EINVAL;
if (!lwtstate) if (!dst)
goto drop; goto drop;
lwtstate = dst->lwtstate;
if (lwtstate->type == LWTUNNEL_ENCAP_NONE || if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
lwtstate->type > LWTUNNEL_ENCAP_MAX) lwtstate->type > LWTUNNEL_ENCAP_MAX)
...@@ -272,27 +245,4 @@ int __lwtunnel_input(struct sk_buff *skb, ...@@ -272,27 +245,4 @@ int __lwtunnel_input(struct sk_buff *skb,
return ret; return ret;
} }
int lwtunnel_input6(struct sk_buff *skb)
{
struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
struct lwtunnel_state *lwtstate = NULL;
if (rt)
lwtstate = rt->rt6i_lwtstate;
return __lwtunnel_input(skb, lwtstate);
}
EXPORT_SYMBOL(lwtunnel_input6);
int lwtunnel_input(struct sk_buff *skb)
{
struct rtable *rt = (struct rtable *)skb_dst(skb);
struct lwtunnel_state *lwtstate = NULL;
if (rt)
lwtstate = rt->rt_lwtstate;
return __lwtunnel_input(skb, lwtstate);
}
EXPORT_SYMBOL(lwtunnel_input); EXPORT_SYMBOL(lwtunnel_input);
...@@ -407,10 +407,10 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) ...@@ -407,10 +407,10 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
return PACKET_REJECT; return PACKET_REJECT;
info = &tun_dst->u.tun_info; info = &tun_dst->u.tun_info;
info->key.ipv4_src = iph->saddr; info->key.u.ipv4.src = iph->saddr;
info->key.ipv4_dst = iph->daddr; info->key.u.ipv4.dst = iph->daddr;
info->key.ipv4_tos = iph->tos; info->key.tos = iph->tos;
info->key.ipv4_ttl = iph->ttl; info->key.ttl = iph->ttl;
info->mode = IP_TUNNEL_INFO_RX; info->mode = IP_TUNNEL_INFO_RX;
info->key.tun_flags = tpi->flags & info->key.tun_flags = tpi->flags &
...@@ -521,15 +521,15 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -521,15 +521,15 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
__be16 df, flags; __be16 df, flags;
int err; int err;
tun_info = skb_tunnel_info(skb, AF_INET); tun_info = skb_tunnel_info(skb);
if (unlikely(!tun_info || tun_info->mode != IP_TUNNEL_INFO_TX)) if (unlikely(!tun_info || tun_info->mode != IP_TUNNEL_INFO_TX))
goto err_free_skb; goto err_free_skb;
key = &tun_info->key; key = &tun_info->key;
memset(&fl, 0, sizeof(fl)); memset(&fl, 0, sizeof(fl));
fl.daddr = key->ipv4_dst; fl.daddr = key->u.ipv4.dst;
fl.saddr = key->ipv4_src; fl.saddr = key->u.ipv4.src;
fl.flowi4_tos = RT_TOS(key->ipv4_tos); fl.flowi4_tos = RT_TOS(key->tos);
fl.flowi4_mark = skb->mark; fl.flowi4_mark = skb->mark;
fl.flowi4_proto = IPPROTO_GRE; fl.flowi4_proto = IPPROTO_GRE;
...@@ -564,8 +564,8 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -564,8 +564,8 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
err = iptunnel_xmit(skb->sk, rt, skb, fl.saddr, err = iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
key->ipv4_dst, IPPROTO_GRE, key->u.ipv4.dst, IPPROTO_GRE,
key->ipv4_tos, key->ipv4_ttl, df, false); key->tos, key->ttl, df, false);
iptunnel_xmit_stats(err, &dev->stats, dev->tstats); iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return; return;
......
...@@ -227,16 +227,16 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr, ...@@ -227,16 +227,16 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP_ID]); tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP_ID]);
if (tb[LWTUNNEL_IP_DST]) if (tb[LWTUNNEL_IP_DST])
tun_info->key.ipv4_dst = nla_get_be32(tb[LWTUNNEL_IP_DST]); tun_info->key.u.ipv4.dst = nla_get_be32(tb[LWTUNNEL_IP_DST]);
if (tb[LWTUNNEL_IP_SRC]) if (tb[LWTUNNEL_IP_SRC])
tun_info->key.ipv4_src = nla_get_be32(tb[LWTUNNEL_IP_SRC]); tun_info->key.u.ipv4.src = nla_get_be32(tb[LWTUNNEL_IP_SRC]);
if (tb[LWTUNNEL_IP_TTL]) if (tb[LWTUNNEL_IP_TTL])
tun_info->key.ipv4_ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]); tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
if (tb[LWTUNNEL_IP_TOS]) if (tb[LWTUNNEL_IP_TOS])
tun_info->key.ipv4_tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]); tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
if (tb[LWTUNNEL_IP_SPORT]) if (tb[LWTUNNEL_IP_SPORT])
tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]); tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]);
...@@ -262,10 +262,10 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb, ...@@ -262,10 +262,10 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate); struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
if (nla_put_u64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) || if (nla_put_u64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) ||
nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.ipv4_dst) || nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.ipv4_src) || nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.ipv4_tos) || nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ipv4_ttl) || nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) || nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) ||
nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) || nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) ||
nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags)) nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
...@@ -299,9 +299,111 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = { ...@@ -299,9 +299,111 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
.cmp_encap = ip_tun_cmp_encap, .cmp_encap = ip_tun_cmp_encap,
}; };
static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
[LWTUNNEL_IP6_ID] = { .type = NLA_U64 },
[LWTUNNEL_IP6_DST] = { .len = sizeof(struct in6_addr) },
[LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) },
[LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 },
[LWTUNNEL_IP6_TC] = { .type = NLA_U8 },
[LWTUNNEL_IP6_SPORT] = { .type = NLA_U16 },
[LWTUNNEL_IP6_DPORT] = { .type = NLA_U16 },
[LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 },
};
static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
struct lwtunnel_state **ts)
{
struct ip_tunnel_info *tun_info;
struct lwtunnel_state *new_state;
struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
int err;
err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy);
if (err < 0)
return err;
new_state = lwtunnel_state_alloc(sizeof(*tun_info));
if (!new_state)
return -ENOMEM;
new_state->type = LWTUNNEL_ENCAP_IP6;
tun_info = lwt_tun_info(new_state);
if (tb[LWTUNNEL_IP6_ID])
tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP6_ID]);
if (tb[LWTUNNEL_IP6_DST])
tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
if (tb[LWTUNNEL_IP6_SRC])
tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);
if (tb[LWTUNNEL_IP6_HOPLIMIT])
tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);
if (tb[LWTUNNEL_IP6_TC])
tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
if (tb[LWTUNNEL_IP6_SPORT])
tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP6_SPORT]);
if (tb[LWTUNNEL_IP6_DPORT])
tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP6_DPORT]);
if (tb[LWTUNNEL_IP6_FLAGS])
tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]);
tun_info->mode = IP_TUNNEL_INFO_TX;
tun_info->options = NULL;
tun_info->options_len = 0;
*ts = new_state;
return 0;
}
static int ip6_tun_fill_encap_info(struct sk_buff *skb,
struct lwtunnel_state *lwtstate)
{
struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
if (nla_put_u64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
nla_put_u16(skb, LWTUNNEL_IP6_SPORT, tun_info->key.tp_src) ||
nla_put_u16(skb, LWTUNNEL_IP6_DPORT, tun_info->key.tp_dst) ||
nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
return -ENOMEM;
return 0;
}
static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
{
return nla_total_size(8) /* LWTUNNEL_IP6_ID */
+ nla_total_size(16) /* LWTUNNEL_IP6_DST */
+ nla_total_size(16) /* LWTUNNEL_IP6_SRC */
+ nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */
+ nla_total_size(1) /* LWTUNNEL_IP6_TC */
+ nla_total_size(2) /* LWTUNNEL_IP6_SPORT */
+ nla_total_size(2) /* LWTUNNEL_IP6_DPORT */
+ nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */
}
static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
.build_state = ip6_tun_build_state,
.fill_encap = ip6_tun_fill_encap_info,
.get_encap_size = ip6_tun_encap_nlsize,
.cmp_encap = ip_tun_cmp_encap,
};
void __init ip_tunnel_core_init(void) void __init ip_tunnel_core_init(void)
{ {
lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP); lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
} }
struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE; struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE;
......
...@@ -1359,7 +1359,6 @@ static void ipv4_dst_destroy(struct dst_entry *dst) ...@@ -1359,7 +1359,6 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
list_del(&rt->rt_uncached); list_del(&rt->rt_uncached);
spin_unlock_bh(&ul->lock); spin_unlock_bh(&ul->lock);
} }
lwtstate_put(rt->rt_lwtstate);
} }
void rt_flush_dev(struct net_device *dev) void rt_flush_dev(struct net_device *dev)
...@@ -1408,7 +1407,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, ...@@ -1408,7 +1407,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
#ifdef CONFIG_IP_ROUTE_CLASSID #ifdef CONFIG_IP_ROUTE_CLASSID
rt->dst.tclassid = nh->nh_tclassid; rt->dst.tclassid = nh->nh_tclassid;
#endif #endif
rt->rt_lwtstate = lwtstate_get(nh->nh_lwtstate); rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
if (unlikely(fnhe)) if (unlikely(fnhe))
cached = rt_bind_exception(rt, fnhe, daddr); cached = rt_bind_exception(rt, fnhe, daddr);
else if (!(rt->dst.flags & DST_NOCACHE)) else if (!(rt->dst.flags & DST_NOCACHE))
...@@ -1494,7 +1493,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, ...@@ -1494,7 +1493,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->rt_gateway = 0; rth->rt_gateway = 0;
rth->rt_uses_gateway = 0; rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached); INIT_LIST_HEAD(&rth->rt_uncached);
rth->rt_lwtstate = NULL;
if (our) { if (our) {
rth->dst.input= ip_local_deliver; rth->dst.input= ip_local_deliver;
rth->rt_flags |= RTCF_LOCAL; rth->rt_flags |= RTCF_LOCAL;
...@@ -1624,19 +1622,18 @@ static int __mkroute_input(struct sk_buff *skb, ...@@ -1624,19 +1622,18 @@ static int __mkroute_input(struct sk_buff *skb,
rth->rt_gateway = 0; rth->rt_gateway = 0;
rth->rt_uses_gateway = 0; rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached); INIT_LIST_HEAD(&rth->rt_uncached);
rth->rt_lwtstate = NULL;
RT_CACHE_STAT_INC(in_slow_tot); RT_CACHE_STAT_INC(in_slow_tot);
rth->dst.input = ip_forward; rth->dst.input = ip_forward;
rth->dst.output = ip_output; rth->dst.output = ip_output;
rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag); rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
if (lwtunnel_output_redirect(rth->rt_lwtstate)) { if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
rth->rt_lwtstate->orig_output = rth->dst.output; rth->dst.lwtstate->orig_output = rth->dst.output;
rth->dst.output = lwtunnel_output; rth->dst.output = lwtunnel_output;
} }
if (lwtunnel_input_redirect(rth->rt_lwtstate)) { if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
rth->rt_lwtstate->orig_input = rth->dst.input; rth->dst.lwtstate->orig_input = rth->dst.input;
rth->dst.input = lwtunnel_input; rth->dst.input = lwtunnel_input;
} }
skb_dst_set(skb, &rth->dst); skb_dst_set(skb, &rth->dst);
...@@ -1695,7 +1692,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, ...@@ -1695,7 +1692,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
by fib_lookup. by fib_lookup.
*/ */
tun_info = skb_tunnel_info(skb, AF_INET); tun_info = skb_tunnel_info(skb);
if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX) if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id; fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
else else
...@@ -1815,7 +1812,6 @@ out: return err; ...@@ -1815,7 +1812,6 @@ out: return err;
rth->rt_gateway = 0; rth->rt_gateway = 0;
rth->rt_uses_gateway = 0; rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached); INIT_LIST_HEAD(&rth->rt_uncached);
rth->rt_lwtstate = NULL;
RT_CACHE_STAT_INC(in_slow_tot); RT_CACHE_STAT_INC(in_slow_tot);
if (res.type == RTN_UNREACHABLE) { if (res.type == RTN_UNREACHABLE) {
...@@ -2006,7 +2002,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res, ...@@ -2006,7 +2002,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
rth->rt_gateway = 0; rth->rt_gateway = 0;
rth->rt_uses_gateway = 0; rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached); INIT_LIST_HEAD(&rth->rt_uncached);
rth->rt_lwtstate = NULL;
RT_CACHE_STAT_INC(out_slow_tot); RT_CACHE_STAT_INC(out_slow_tot);
if (flags & RTCF_LOCAL) if (flags & RTCF_LOCAL)
...@@ -2029,7 +2024,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, ...@@ -2029,7 +2024,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
} }
rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0); rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
if (lwtunnel_output_redirect(rth->rt_lwtstate)) if (lwtunnel_output_redirect(rth->dst.lwtstate))
rth->dst.output = lwtunnel_output; rth->dst.output = lwtunnel_output;
return rth; return rth;
...@@ -2293,7 +2288,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or ...@@ -2293,7 +2288,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_uses_gateway = ort->rt_uses_gateway; rt->rt_uses_gateway = ort->rt_uses_gateway;
INIT_LIST_HEAD(&rt->rt_uncached); INIT_LIST_HEAD(&rt->rt_uncached);
rt->rt_lwtstate = NULL;
dst_free(new); dst_free(new);
} }
......
...@@ -3656,7 +3656,7 @@ static void addrconf_dad_work(struct work_struct *w) ...@@ -3656,7 +3656,7 @@ static void addrconf_dad_work(struct work_struct *w)
/* send a neighbour solicitation for our addr */ /* send a neighbour solicitation for our addr */
addrconf_addr_solict_mult(&ifp->addr, &mcaddr); addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any); ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any, NULL);
out: out:
in6_ifa_put(ifp); in6_ifa_put(ifp);
rtnl_unlock(); rtnl_unlock();
......
...@@ -89,16 +89,13 @@ static void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p) ...@@ -89,16 +89,13 @@ static void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
static int ila_output(struct sock *sk, struct sk_buff *skb) static int ila_output(struct sock *sk, struct sk_buff *skb)
{ {
struct dst_entry *dst = skb_dst(skb); struct dst_entry *dst = skb_dst(skb);
struct rt6_info *rt6 = NULL;
if (skb->protocol != htons(ETH_P_IPV6)) if (skb->protocol != htons(ETH_P_IPV6))
goto drop; goto drop;
rt6 = (struct rt6_info *)dst; update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
update_ipv6_locator(skb, ila_params_lwtunnel(rt6->rt6i_lwtstate)); return dst->lwtstate->orig_output(sk, skb);
return rt6->rt6i_lwtstate->orig_output(sk, skb);
drop: drop:
kfree_skb(skb); kfree_skb(skb);
...@@ -108,16 +105,13 @@ static int ila_output(struct sock *sk, struct sk_buff *skb) ...@@ -108,16 +105,13 @@ static int ila_output(struct sock *sk, struct sk_buff *skb)
static int ila_input(struct sk_buff *skb) static int ila_input(struct sk_buff *skb)
{ {
struct dst_entry *dst = skb_dst(skb); struct dst_entry *dst = skb_dst(skb);
struct rt6_info *rt6 = NULL;
if (skb->protocol != htons(ETH_P_IPV6)) if (skb->protocol != htons(ETH_P_IPV6))
goto drop; goto drop;
rt6 = (struct rt6_info *)dst; update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
update_ipv6_locator(skb, ila_params_lwtunnel(rt6->rt6i_lwtstate));
return rt6->rt6i_lwtstate->orig_input(skb); return dst->lwtstate->orig_input(skb);
drop: drop:
kfree_skb(skb); kfree_skb(skb);
......
...@@ -178,7 +178,6 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) ...@@ -178,7 +178,6 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
static void rt6_release(struct rt6_info *rt) static void rt6_release(struct rt6_info *rt)
{ {
if (atomic_dec_and_test(&rt->rt6i_ref)) { if (atomic_dec_and_test(&rt->rt6i_ref)) {
lwtstate_put(rt->rt6i_lwtstate);
rt6_free_pcpu(rt); rt6_free_pcpu(rt);
dst_free(&rt->dst); dst_free(&rt->dst);
} }
......
...@@ -553,7 +553,8 @@ static void ndisc_send_unsol_na(struct net_device *dev) ...@@ -553,7 +553,8 @@ static void ndisc_send_unsol_na(struct net_device *dev)
void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
const struct in6_addr *solicit, const struct in6_addr *solicit,
const struct in6_addr *daddr, const struct in6_addr *saddr) const struct in6_addr *daddr, const struct in6_addr *saddr,
struct sk_buff *oskb)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct in6_addr addr_buf; struct in6_addr addr_buf;
...@@ -589,6 +590,9 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, ...@@ -589,6 +590,9 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR, ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
dev->dev_addr); dev->dev_addr);
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
skb_dst_copy(skb, oskb);
ndisc_send_skb(skb, daddr, saddr); ndisc_send_skb(skb, daddr, saddr);
} }
...@@ -675,12 +679,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) ...@@ -675,12 +679,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
"%s: trying to ucast probe in NUD_INVALID: %pI6\n", "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
__func__, target); __func__, target);
} }
ndisc_send_ns(dev, neigh, target, target, saddr); ndisc_send_ns(dev, neigh, target, target, saddr, skb);
} else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) { } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
neigh_app_ns(neigh); neigh_app_ns(neigh);
} else { } else {
addrconf_addr_solict_mult(target, &mcaddr); addrconf_addr_solict_mult(target, &mcaddr);
ndisc_send_ns(dev, NULL, target, &mcaddr, saddr); ndisc_send_ns(dev, NULL, target, &mcaddr, saddr, skb);
} }
} }
......
...@@ -54,11 +54,13 @@ ...@@ -54,11 +54,13 @@
#include <net/tcp.h> #include <net/tcp.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <net/dst.h> #include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/netevent.h> #include <net/netevent.h>
#include <net/netlink.h> #include <net/netlink.h>
#include <net/nexthop.h> #include <net/nexthop.h>
#include <net/lwtunnel.h> #include <net/lwtunnel.h>
#include <net/ip_tunnels.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -538,7 +540,7 @@ static void rt6_probe_deferred(struct work_struct *w) ...@@ -538,7 +540,7 @@ static void rt6_probe_deferred(struct work_struct *w)
container_of(w, struct __rt6_probe_work, work); container_of(w, struct __rt6_probe_work, work);
addrconf_addr_solict_mult(&work->target, &mcaddr); addrconf_addr_solict_mult(&work->target, &mcaddr);
ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL); ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
dev_put(work->dev); dev_put(work->dev);
kfree(work); kfree(work);
} }
...@@ -1131,6 +1133,7 @@ void ip6_route_input(struct sk_buff *skb) ...@@ -1131,6 +1133,7 @@ void ip6_route_input(struct sk_buff *skb)
const struct ipv6hdr *iph = ipv6_hdr(skb); const struct ipv6hdr *iph = ipv6_hdr(skb);
struct net *net = dev_net(skb->dev); struct net *net = dev_net(skb->dev);
int flags = RT6_LOOKUP_F_HAS_SADDR; int flags = RT6_LOOKUP_F_HAS_SADDR;
struct ip_tunnel_info *tun_info;
struct flowi6 fl6 = { struct flowi6 fl6 = {
.flowi6_iif = skb->dev->ifindex, .flowi6_iif = skb->dev->ifindex,
.daddr = iph->daddr, .daddr = iph->daddr,
...@@ -1140,6 +1143,10 @@ void ip6_route_input(struct sk_buff *skb) ...@@ -1140,6 +1143,10 @@ void ip6_route_input(struct sk_buff *skb)
.flowi6_proto = iph->nexthdr, .flowi6_proto = iph->nexthdr,
}; };
tun_info = skb_tunnel_info(skb);
if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
skb_dst_drop(skb);
skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags)); skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
} }
...@@ -1784,14 +1791,14 @@ int ip6_route_add(struct fib6_config *cfg) ...@@ -1784,14 +1791,14 @@ int ip6_route_add(struct fib6_config *cfg)
cfg->fc_encap, &lwtstate); cfg->fc_encap, &lwtstate);
if (err) if (err)
goto out; goto out;
rt->rt6i_lwtstate = lwtstate_get(lwtstate); rt->dst.lwtstate = lwtstate_get(lwtstate);
if (lwtunnel_output_redirect(rt->rt6i_lwtstate)) { if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
rt->rt6i_lwtstate->orig_output = rt->dst.output; rt->dst.lwtstate->orig_output = rt->dst.output;
rt->dst.output = lwtunnel_output6; rt->dst.output = lwtunnel_output;
} }
if (lwtunnel_input_redirect(rt->rt6i_lwtstate)) { if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
rt->rt6i_lwtstate->orig_input = rt->dst.input; rt->dst.lwtstate->orig_input = rt->dst.input;
rt->dst.input = lwtunnel_input6; rt->dst.input = lwtunnel_input;
} }
} }
...@@ -2174,7 +2181,7 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort) ...@@ -2174,7 +2181,7 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
#endif #endif
rt->rt6i_prefsrc = ort->rt6i_prefsrc; rt->rt6i_prefsrc = ort->rt6i_prefsrc;
rt->rt6i_table = ort->rt6i_table; rt->rt6i_table = ort->rt6i_table;
rt->rt6i_lwtstate = lwtstate_get(ort->rt6i_lwtstate); rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
} }
#ifdef CONFIG_IPV6_ROUTE_INFO #ifdef CONFIG_IPV6_ROUTE_INFO
...@@ -2838,7 +2845,7 @@ static inline size_t rt6_nlmsg_size(struct rt6_info *rt) ...@@ -2838,7 +2845,7 @@ static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
+ nla_total_size(sizeof(struct rta_cacheinfo)) + nla_total_size(sizeof(struct rta_cacheinfo))
+ nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */ + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
+ nla_total_size(1) /* RTA_PREF */ + nla_total_size(1) /* RTA_PREF */
+ lwtunnel_get_encap_size(rt->rt6i_lwtstate); + lwtunnel_get_encap_size(rt->dst.lwtstate);
} }
static int rt6_fill_node(struct net *net, static int rt6_fill_node(struct net *net,
...@@ -2991,7 +2998,7 @@ static int rt6_fill_node(struct net *net, ...@@ -2991,7 +2998,7 @@ static int rt6_fill_node(struct net *net,
if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
goto nla_put_failure; goto nla_put_failure;
lwtunnel_fill_encap(skb, rt->rt6i_lwtstate); lwtunnel_fill_encap(skb, rt->dst.lwtstate);
nlmsg_end(skb, nlh); nlmsg_end(skb, nlh);
return 0; return 0;
......
...@@ -48,7 +48,6 @@ int mpls_output(struct sock *sk, struct sk_buff *skb) ...@@ -48,7 +48,6 @@ int mpls_output(struct sock *sk, struct sk_buff *skb)
struct dst_entry *dst = skb_dst(skb); struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = NULL; struct rtable *rt = NULL;
struct rt6_info *rt6 = NULL; struct rt6_info *rt6 = NULL;
struct lwtunnel_state *lwtstate = NULL;
int err = 0; int err = 0;
bool bos; bool bos;
int i; int i;
...@@ -58,11 +57,9 @@ int mpls_output(struct sock *sk, struct sk_buff *skb) ...@@ -58,11 +57,9 @@ int mpls_output(struct sock *sk, struct sk_buff *skb)
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
ttl = ip_hdr(skb)->ttl; ttl = ip_hdr(skb)->ttl;
rt = (struct rtable *)dst; rt = (struct rtable *)dst;
lwtstate = rt->rt_lwtstate;
} else if (skb->protocol == htons(ETH_P_IPV6)) { } else if (skb->protocol == htons(ETH_P_IPV6)) {
ttl = ipv6_hdr(skb)->hop_limit; ttl = ipv6_hdr(skb)->hop_limit;
rt6 = (struct rt6_info *)dst; rt6 = (struct rt6_info *)dst;
lwtstate = rt6->rt6i_lwtstate;
} else { } else {
goto drop; goto drop;
} }
...@@ -72,12 +69,12 @@ int mpls_output(struct sock *sk, struct sk_buff *skb) ...@@ -72,12 +69,12 @@ int mpls_output(struct sock *sk, struct sk_buff *skb)
/* Find the output device */ /* Find the output device */
out_dev = dst->dev; out_dev = dst->dev;
if (!mpls_output_possible(out_dev) || if (!mpls_output_possible(out_dev) ||
!lwtstate || skb_warn_if_lro(skb)) !dst->lwtstate || skb_warn_if_lro(skb))
goto drop; goto drop;
skb_forward_csum(skb); skb_forward_csum(skb);
tun_encap_info = mpls_lwtunnel_encap(lwtstate); tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate);
/* Verify the destination can hold the packet */ /* Verify the destination can hold the packet */
new_header_size = mpls_encap_size(tun_encap_info); new_header_size = mpls_encap_size(tun_encap_info);
......
...@@ -534,19 +534,19 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, ...@@ -534,19 +534,19 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
tun_flags |= TUNNEL_KEY; tun_flags |= TUNNEL_KEY;
break; break;
case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
SW_FLOW_KEY_PUT(match, tun_key.ipv4_src, SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
nla_get_in_addr(a), is_mask); nla_get_in_addr(a), is_mask);
break; break;
case OVS_TUNNEL_KEY_ATTR_IPV4_DST: case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst, SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
nla_get_in_addr(a), is_mask); nla_get_in_addr(a), is_mask);
break; break;
case OVS_TUNNEL_KEY_ATTR_TOS: case OVS_TUNNEL_KEY_ATTR_TOS:
SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos, SW_FLOW_KEY_PUT(match, tun_key.tos,
nla_get_u8(a), is_mask); nla_get_u8(a), is_mask);
break; break;
case OVS_TUNNEL_KEY_ATTR_TTL: case OVS_TUNNEL_KEY_ATTR_TTL:
SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl, SW_FLOW_KEY_PUT(match, tun_key.ttl,
nla_get_u8(a), is_mask); nla_get_u8(a), is_mask);
ttl = true; ttl = true;
break; break;
...@@ -609,7 +609,7 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, ...@@ -609,7 +609,7 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
} }
if (!is_mask) { if (!is_mask) {
if (!match->key->tun_key.ipv4_dst) { if (!match->key->tun_key.u.ipv4.dst) {
OVS_NLERR(log, "IPv4 tunnel dst address is zero"); OVS_NLERR(log, "IPv4 tunnel dst address is zero");
return -EINVAL; return -EINVAL;
} }
...@@ -647,18 +647,18 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb, ...@@ -647,18 +647,18 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
if (output->tun_flags & TUNNEL_KEY && if (output->tun_flags & TUNNEL_KEY &&
nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id)) nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
return -EMSGSIZE; return -EMSGSIZE;
if (output->ipv4_src && if (output->u.ipv4.src &&
nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
output->ipv4_src)) output->u.ipv4.src))
return -EMSGSIZE; return -EMSGSIZE;
if (output->ipv4_dst && if (output->u.ipv4.dst &&
nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
output->ipv4_dst)) output->u.ipv4.dst))
return -EMSGSIZE; return -EMSGSIZE;
if (output->ipv4_tos && if (output->tos &&
nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos)) nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
return -EMSGSIZE; return -EMSGSIZE;
if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl)) if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
return -EMSGSIZE; return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) && if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
...@@ -1116,7 +1116,7 @@ int ovs_nla_get_match(struct sw_flow_match *match, ...@@ -1116,7 +1116,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
/* The userspace does not send tunnel attributes that /* The userspace does not send tunnel attributes that
* are 0, but we should not wildcard them nonetheless. * are 0, but we should not wildcard them nonetheless.
*/ */
if (match->key->tun_key.ipv4_dst) if (match->key->tun_key.u.ipv4.dst)
SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
0xff, true); 0xff, true);
...@@ -1287,7 +1287,7 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey, ...@@ -1287,7 +1287,7 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
goto nla_put_failure; goto nla_put_failure;
if ((swkey->tun_key.ipv4_dst || is_mask)) { if ((swkey->tun_key.u.ipv4.dst || is_mask)) {
const void *opts = NULL; const void *opts = NULL;
if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT) if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
......
...@@ -426,7 +426,7 @@ static u32 flow_hash(const struct sw_flow_key *key, ...@@ -426,7 +426,7 @@ static u32 flow_hash(const struct sw_flow_key *key,
static int flow_key_start(const struct sw_flow_key *key) static int flow_key_start(const struct sw_flow_key *key)
{ {
if (key->tun_key.ipv4_dst) if (key->tun_key.u.ipv4.dst)
return 0; return 0;
else else
return rounddown(offsetof(struct sw_flow_key, phy), return rounddown(offsetof(struct sw_flow_key, phy),
......
...@@ -203,8 +203,8 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb) ...@@ -203,8 +203,8 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
} }
err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr, err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr,
tun_key->ipv4_dst, tun_key->ipv4_tos, tun_key->u.ipv4.dst, tun_key->tos,
tun_key->ipv4_ttl, df, sport, dport, tun_key->ttl, df, sport, dport,
tun_key->tun_flags, vni, opts_len, opts, tun_key->tun_flags, vni, opts_len, opts,
!!(tun_key->tun_flags & TUNNEL_CSUM), false); !!(tun_key->tun_flags & TUNNEL_CSUM), false);
if (err < 0) if (err < 0)
......
...@@ -57,7 +57,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) ...@@ -57,7 +57,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
skb_push(skb, ETH_HLEN); skb_push(skb, ETH_HLEN);
ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN); ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
ovs_vport_receive(vport, skb, skb_tunnel_info(skb, AF_INET)); ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
return; return;
error: error:
......
...@@ -603,9 +603,9 @@ int ovs_tunnel_get_egress_info(struct ip_tunnel_info *egress_tun_info, ...@@ -603,9 +603,9 @@ int ovs_tunnel_get_egress_info(struct ip_tunnel_info *egress_tun_info,
* saddr, tp_src and tp_dst * saddr, tp_src and tp_dst
*/ */
__ip_tunnel_info_init(egress_tun_info, __ip_tunnel_info_init(egress_tun_info,
fl.saddr, tun_key->ipv4_dst, fl.saddr, tun_key->u.ipv4.dst,
tun_key->ipv4_tos, tun_key->tos,
tun_key->ipv4_ttl, tun_key->ttl,
tp_src, tp_dst, tp_src, tp_dst,
tun_key->tun_id, tun_key->tun_id,
tun_key->tun_flags, tun_key->tun_flags,
......
...@@ -254,9 +254,9 @@ static inline struct rtable *ovs_tunnel_route_lookup(struct net *net, ...@@ -254,9 +254,9 @@ static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
struct rtable *rt; struct rtable *rt;
memset(fl, 0, sizeof(*fl)); memset(fl, 0, sizeof(*fl));
fl->daddr = key->ipv4_dst; fl->daddr = key->u.ipv4.dst;
fl->saddr = key->ipv4_src; fl->saddr = key->u.ipv4.src;
fl->flowi4_tos = RT_TOS(key->ipv4_tos); fl->flowi4_tos = RT_TOS(key->tos);
fl->flowi4_mark = mark; fl->flowi4_mark = mark;
fl->flowi4_proto = protocol; fl->flowi4_proto = protocol;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment