Commit d5005140 authored by Steffen Klassert's avatar Steffen Klassert Committed by David S. Miller

ipv6: Allow sending packets through tunnels with wildcard endpoints

Currently we need the IP6_TNL_F_CAP_XMIT capabiltiy to transmit
packets through an ipv6 tunnel. This capability is set when the
tunnel gets configured, based on the tunnel endpoint addresses.

On tunnels with wildcard tunnel endpoints, we need to do the
capabiltiy checking on a per packet basis like it is done in
the receive path.

This patch extends ip6_tnl_xmit_ctl() to take local and remote
addresses as parameters to allow for per packet capabiltiy
checking.
Signed-off-by: default avatarSteffen Klassert <steffen.klassert@secunet.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e1b2cb65
...@@ -65,7 +65,8 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t); ...@@ -65,7 +65,8 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t);
void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst); void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst);
int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr); const struct in6_addr *raddr);
int ip6_tnl_xmit_ctl(struct ip6_tnl *t); int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr);
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw); __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr); const struct in6_addr *raddr);
......
...@@ -902,7 +902,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, ...@@ -902,7 +902,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
struct net_device_stats *stats = &t->dev->stats; struct net_device_stats *stats = &t->dev->stats;
int ret; int ret;
if (!ip6_tnl_xmit_ctl(t)) if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
goto tx_err; goto tx_err;
switch (skb->protocol) { switch (skb->protocol) {
......
...@@ -909,24 +909,28 @@ ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) ...@@ -909,24 +909,28 @@ ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
} }
int ip6_tnl_xmit_ctl(struct ip6_tnl *t) int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
const struct in6_addr *laddr,
const struct in6_addr *raddr)
{ {
struct __ip6_tnl_parm *p = &t->parms; struct __ip6_tnl_parm *p = &t->parms;
int ret = 0; int ret = 0;
struct net *net = t->net; struct net *net = t->net;
if (p->flags & IP6_TNL_F_CAP_XMIT) { if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
(ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
struct net_device *ldev = NULL; struct net_device *ldev = NULL;
rcu_read_lock(); rcu_read_lock();
if (p->link) if (p->link)
ldev = dev_get_by_index_rcu(net, p->link); ldev = dev_get_by_index_rcu(net, p->link);
if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
pr_warn("%s xmit: Local address not yet configured!\n", pr_warn("%s xmit: Local address not yet configured!\n",
p->name); p->name);
else if (!ipv6_addr_is_multicast(&p->raddr) && else if (!ipv6_addr_is_multicast(raddr) &&
unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0))) unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
p->name); p->name);
else else
...@@ -977,6 +981,10 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, ...@@ -977,6 +981,10 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
if (!fl6->flowi6_mark) if (!fl6->flowi6_mark)
dst = ip6_tnl_dst_check(t); dst = ip6_tnl_dst_check(t);
if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
goto tx_err_link_failure;
if (!dst) { if (!dst) {
ndst = ip6_route_output(net, NULL, fl6); ndst = ip6_route_output(net, NULL, fl6);
...@@ -1086,8 +1094,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1086,8 +1094,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
int err; int err;
tproto = ACCESS_ONCE(t->parms.proto); tproto = ACCESS_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPIP && tproto != 0) || if (tproto != IPPROTO_IPIP && tproto != 0)
!ip6_tnl_xmit_ctl(t))
return -1; return -1;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
...@@ -1131,7 +1138,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1131,7 +1138,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
tproto = ACCESS_ONCE(t->parms.proto); tproto = ACCESS_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPV6 && tproto != 0) || if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
!ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) ip6_tnl_addr_conflict(t, ipv6h))
return -1; return -1;
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
......
...@@ -416,6 +416,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) ...@@ -416,6 +416,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
struct net_device_stats *stats = &t->dev->stats; struct net_device_stats *stats = &t->dev->stats;
struct dst_entry *dst = skb_dst(skb); struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev; struct net_device *tdev;
struct xfrm_state *x;
int err = -1; int err = -1;
if (!dst) if (!dst)
...@@ -429,7 +430,12 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) ...@@ -429,7 +430,12 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
goto tx_err_link_failure; goto tx_err_link_failure;
} }
if (!vti6_state_check(dst->xfrm, &t->parms.raddr, &t->parms.laddr)) x = dst->xfrm;
if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr))
goto tx_err_link_failure;
if (!ip6_tnl_xmit_ctl(t, (const struct in6_addr *)&x->props.saddr,
(const struct in6_addr *)&x->id.daddr))
goto tx_err_link_failure; goto tx_err_link_failure;
tdev = dst->dev; tdev = dst->dev;
...@@ -484,7 +490,7 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -484,7 +490,7 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
ipv6h = ipv6_hdr(skb); ipv6h = ipv6_hdr(skb);
if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
!ip6_tnl_xmit_ctl(t) || vti6_addr_conflict(t, ipv6h)) vti6_addr_conflict(t, ipv6h))
goto tx_err; goto tx_err;
xfrm_decode_session(skb, &fl, AF_INET6); xfrm_decode_session(skb, &fl, AF_INET6);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment