Commit d1ac3b16 authored by David S. Miller's avatar David S. Miller

Merge branch 'ipv6-tunnel-cleanups'

Tom Herbert says:

====================
net: Cleanup IPv6 ip tunnels

The IPv6 tunnel code is very different from IPv4 code. There is a lot
of redundancy with the IPv4 code, particularly in the GRE tunneling.

This patch set cleans up the tunnel code to make the IPv6 code look
more like the IPv4 code and use common functions between the two
stacks where possible.

This work should make it easier to maintain and extend the IPv6 ip
tunnels.

Items in this patch set:
  - Cleanup IPv6 tunnel receive path (ip6_tnl_rcv). Includes using
    gro_cells and exporting ip6_tnl_rcv so the ip6_gre can call it
  - Move GRE functions to common header file (tx functions) or
    gre_demux.c (rx functions like gre_parse_header)
  - Call common GRE functions from IPv6 GRE
  - Create ip6_tnl_xmit (to be like ip_tunnel_xmit)

Tested:
  Ran super_netperf tests for TCP_RR and TCP_STREAM for:
    - IPv4 over gre, gretap, gre6, gre6tap
    - IPv6 over gre, gretap, gre6, gre6tap
    - ipip
    - ip6ip6
    - ipip/gue
    - IPv6 over gre/gue
    - IPv4 over gre/gue
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 570d6320 b05229f4
...@@ -25,4 +25,108 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version); ...@@ -25,4 +25,108 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version);
struct net_device *gretap_fb_dev_create(struct net *net, const char *name, struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
u8 name_assign_type); u8 name_assign_type);
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, int *hdr_len);
static inline int gre_calc_hlen(__be16 o_flags)
{
int addend = 4;
if (o_flags & TUNNEL_CSUM)
addend += 4;
if (o_flags & TUNNEL_KEY)
addend += 4;
if (o_flags & TUNNEL_SEQ)
addend += 4;
return addend;
}
static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
{
__be16 tflags = 0;
if (flags & GRE_CSUM)
tflags |= TUNNEL_CSUM;
if (flags & GRE_ROUTING)
tflags |= TUNNEL_ROUTING;
if (flags & GRE_KEY)
tflags |= TUNNEL_KEY;
if (flags & GRE_SEQ)
tflags |= TUNNEL_SEQ;
if (flags & GRE_STRICT)
tflags |= TUNNEL_STRICT;
if (flags & GRE_REC)
tflags |= TUNNEL_REC;
if (flags & GRE_VERSION)
tflags |= TUNNEL_VERSION;
return tflags;
}
static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags)
{
__be16 flags = 0;
if (tflags & TUNNEL_CSUM)
flags |= GRE_CSUM;
if (tflags & TUNNEL_ROUTING)
flags |= GRE_ROUTING;
if (tflags & TUNNEL_KEY)
flags |= GRE_KEY;
if (tflags & TUNNEL_SEQ)
flags |= GRE_SEQ;
if (tflags & TUNNEL_STRICT)
flags |= GRE_STRICT;
if (tflags & TUNNEL_REC)
flags |= GRE_REC;
if (tflags & TUNNEL_VERSION)
flags |= GRE_VERSION;
return flags;
}
static inline __sum16 gre_checksum(struct sk_buff *skb)
{
__wsum csum;
if (skb->ip_summed == CHECKSUM_PARTIAL)
csum = lco_csum(skb);
else
csum = skb_checksum(skb, 0, skb->len, 0);
return csum_fold(csum);
}
static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
__be16 flags, __be16 proto,
__be32 key, __be32 seq)
{
struct gre_base_hdr *greh;
skb_push(skb, hdr_len);
skb_reset_transport_header(skb);
greh = (struct gre_base_hdr *)skb->data;
greh->flags = gre_tnl_flags_to_gre_flags(flags);
greh->protocol = proto;
if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
if (flags & TUNNEL_SEQ) {
*ptr = seq;
ptr--;
}
if (flags & TUNNEL_KEY) {
*ptr = key;
ptr--;
}
if (flags & TUNNEL_CSUM &&
!(skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
*ptr = 0;
*(__sum16 *)ptr = gre_checksum(skb);
}
}
}
#endif #endif
...@@ -42,6 +42,7 @@ struct ip6_tnl { ...@@ -42,6 +42,7 @@ struct ip6_tnl {
struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */ struct flowi fl; /* flowi template for xmit */
struct dst_cache dst_cache; /* cached dst */ struct dst_cache dst_cache; /* cached dst */
struct gro_cells gro_cells;
int err_count; int err_count;
unsigned long err_time; unsigned long err_time;
...@@ -49,8 +50,10 @@ struct ip6_tnl { ...@@ -49,8 +50,10 @@ struct ip6_tnl {
/* These fields used only by GRE */ /* These fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */ __u32 i_seqno; /* The last seen seqno */
__u32 o_seqno; /* The last output seqno */ __u32 o_seqno; /* The last output seqno */
int hlen; /* Precalculated GRE header length */ int hlen; /* tun_hlen + encap_hlen */
int tun_hlen; /* Precalculated header length */
int mlink; int mlink;
}; };
/* Tunnel encapsulation limit destination sub-option */ /* Tunnel encapsulation limit destination sub-option */
...@@ -63,13 +66,19 @@ struct ipv6_tlv_tnl_enc_lim { ...@@ -63,13 +66,19 @@ struct ipv6_tlv_tnl_enc_lim {
int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr); const struct in6_addr *raddr);
int ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
bool log_ecn_error);
int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr); const struct in6_addr *raddr);
int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
struct flowi6 *fl6, int encap_limit, __u32 *pmtu, __u8 proto);
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw); __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr); const struct in6_addr *raddr);
struct net *ip6_tnl_get_link_net(const struct net_device *dev); struct net *ip6_tnl_get_link_net(const struct net_device *dev);
int ip6_tnl_get_iflink(const struct net_device *dev); int ip6_tnl_get_iflink(const struct net_device *dev);
int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
#ifdef CONFIG_INET #ifdef CONFIG_INET
static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
......
...@@ -60,6 +60,70 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version) ...@@ -60,6 +60,70 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
} }
EXPORT_SYMBOL_GPL(gre_del_protocol); EXPORT_SYMBOL_GPL(gre_del_protocol);
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, int *ret_hdr_len)
{
const struct gre_base_hdr *greh;
__be32 *options;
int hdr_len;
if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
return -EINVAL;
greh = (struct gre_base_hdr *)skb_transport_header(skb);
if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
tpi->flags = gre_flags_to_tnl_flags(greh->flags);
hdr_len = gre_calc_hlen(tpi->flags);
if (!pskb_may_pull(skb, hdr_len))
return -EINVAL;
greh = (struct gre_base_hdr *)skb_transport_header(skb);
tpi->proto = greh->protocol;
options = (__be32 *)(greh + 1);
if (greh->flags & GRE_CSUM) {
if (skb_checksum_simple_validate(skb)) {
*csum_err = true;
return -EINVAL;
}
skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
null_compute_pseudo);
options++;
}
if (greh->flags & GRE_KEY) {
tpi->key = *options;
options++;
} else {
tpi->key = 0;
}
if (unlikely(greh->flags & GRE_SEQ)) {
tpi->seq = *options;
options++;
} else {
tpi->seq = 0;
}
/* WCCP version 1 and 2 protocol decoding.
* - Change protocol to IP
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
*/
if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
tpi->proto = htons(ETH_P_IP);
if ((*(u8 *)options & 0xF0) != 0x40) {
hdr_len += 4;
if (!pskb_may_pull(skb, hdr_len))
return -EINVAL;
}
}
*ret_hdr_len = hdr_len;
return 0;
}
EXPORT_SYMBOL(gre_parse_header);
static int gre_rcv(struct sk_buff *skb) static int gre_rcv(struct sk_buff *skb)
{ {
const struct gre_protocol *proto; const struct gre_protocol *proto;
......
...@@ -122,125 +122,6 @@ static int ipgre_tunnel_init(struct net_device *dev); ...@@ -122,125 +122,6 @@ static int ipgre_tunnel_init(struct net_device *dev);
static int ipgre_net_id __read_mostly; static int ipgre_net_id __read_mostly;
static int gre_tap_net_id __read_mostly; static int gre_tap_net_id __read_mostly;
static int ip_gre_calc_hlen(__be16 o_flags)
{
int addend = 4;
if (o_flags & TUNNEL_CSUM)
addend += 4;
if (o_flags & TUNNEL_KEY)
addend += 4;
if (o_flags & TUNNEL_SEQ)
addend += 4;
return addend;
}
static __be16 gre_flags_to_tnl_flags(__be16 flags)
{
__be16 tflags = 0;
if (flags & GRE_CSUM)
tflags |= TUNNEL_CSUM;
if (flags & GRE_ROUTING)
tflags |= TUNNEL_ROUTING;
if (flags & GRE_KEY)
tflags |= TUNNEL_KEY;
if (flags & GRE_SEQ)
tflags |= TUNNEL_SEQ;
if (flags & GRE_STRICT)
tflags |= TUNNEL_STRICT;
if (flags & GRE_REC)
tflags |= TUNNEL_REC;
if (flags & GRE_VERSION)
tflags |= TUNNEL_VERSION;
return tflags;
}
static __be16 tnl_flags_to_gre_flags(__be16 tflags)
{
__be16 flags = 0;
if (tflags & TUNNEL_CSUM)
flags |= GRE_CSUM;
if (tflags & TUNNEL_ROUTING)
flags |= GRE_ROUTING;
if (tflags & TUNNEL_KEY)
flags |= GRE_KEY;
if (tflags & TUNNEL_SEQ)
flags |= GRE_SEQ;
if (tflags & TUNNEL_STRICT)
flags |= GRE_STRICT;
if (tflags & TUNNEL_REC)
flags |= GRE_REC;
if (tflags & TUNNEL_VERSION)
flags |= GRE_VERSION;
return flags;
}
static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err)
{
const struct gre_base_hdr *greh;
__be32 *options;
int hdr_len;
if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
return -EINVAL;
greh = (struct gre_base_hdr *)skb_transport_header(skb);
if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
tpi->flags = gre_flags_to_tnl_flags(greh->flags);
hdr_len = ip_gre_calc_hlen(tpi->flags);
if (!pskb_may_pull(skb, hdr_len))
return -EINVAL;
greh = (struct gre_base_hdr *)skb_transport_header(skb);
tpi->proto = greh->protocol;
options = (__be32 *)(greh + 1);
if (greh->flags & GRE_CSUM) {
if (skb_checksum_simple_validate(skb)) {
*csum_err = true;
return -EINVAL;
}
skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
null_compute_pseudo);
options++;
}
if (greh->flags & GRE_KEY) {
tpi->key = *options;
options++;
} else {
tpi->key = 0;
}
if (unlikely(greh->flags & GRE_SEQ)) {
tpi->seq = *options;
options++;
} else {
tpi->seq = 0;
}
/* WCCP version 1 and 2 protocol decoding.
* - Change protocol to IP
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
*/
if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
tpi->proto = htons(ETH_P_IP);
if ((*(u8 *)options & 0xF0) != 0x40) {
hdr_len += 4;
if (!pskb_may_pull(skb, hdr_len))
return -EINVAL;
}
}
return iptunnel_pull_header(skb, hdr_len, tpi->proto, false);
}
static void ipgre_err(struct sk_buff *skb, u32 info, static void ipgre_err(struct sk_buff *skb, u32 info,
const struct tnl_ptk_info *tpi) const struct tnl_ptk_info *tpi)
{ {
...@@ -340,12 +221,16 @@ static void gre_err(struct sk_buff *skb, u32 info) ...@@ -340,12 +221,16 @@ static void gre_err(struct sk_buff *skb, u32 info)
const int code = icmp_hdr(skb)->code; const int code = icmp_hdr(skb)->code;
struct tnl_ptk_info tpi; struct tnl_ptk_info tpi;
bool csum_err = false; bool csum_err = false;
int hdr_len;
if (parse_gre_header(skb, &tpi, &csum_err)) { if (gre_parse_header(skb, &tpi, &csum_err, &hdr_len)) {
if (!csum_err) /* ignore csum errors. */ if (!csum_err) /* ignore csum errors. */
return; return;
} }
if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
return;
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, dev_net(skb->dev), info, ipv4_update_pmtu(skb, dev_net(skb->dev), info,
skb->dev->ifindex, 0, IPPROTO_GRE, 0); skb->dev->ifindex, 0, IPPROTO_GRE, 0);
...@@ -419,6 +304,7 @@ static int gre_rcv(struct sk_buff *skb) ...@@ -419,6 +304,7 @@ static int gre_rcv(struct sk_buff *skb)
{ {
struct tnl_ptk_info tpi; struct tnl_ptk_info tpi;
bool csum_err = false; bool csum_err = false;
int hdr_len;
#ifdef CONFIG_NET_IPGRE_BROADCAST #ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) { if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
...@@ -428,7 +314,10 @@ static int gre_rcv(struct sk_buff *skb) ...@@ -428,7 +314,10 @@ static int gre_rcv(struct sk_buff *skb)
} }
#endif #endif
if (parse_gre_header(skb, &tpi, &csum_err) < 0) if (gre_parse_header(skb, &tpi, &csum_err, &hdr_len) < 0)
goto drop;
if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
goto drop; goto drop;
if (ipgre_rcv(skb, &tpi) == PACKET_RCVD) if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
...@@ -440,49 +329,6 @@ static int gre_rcv(struct sk_buff *skb) ...@@ -440,49 +329,6 @@ static int gre_rcv(struct sk_buff *skb)
return 0; return 0;
} }
static __sum16 gre_checksum(struct sk_buff *skb)
{
__wsum csum;
if (skb->ip_summed == CHECKSUM_PARTIAL)
csum = lco_csum(skb);
else
csum = skb_checksum(skb, 0, skb->len, 0);
return csum_fold(csum);
}
static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
__be16 proto, __be32 key, __be32 seq)
{
struct gre_base_hdr *greh;
skb_push(skb, hdr_len);
skb_reset_transport_header(skb);
greh = (struct gre_base_hdr *)skb->data;
greh->flags = tnl_flags_to_gre_flags(flags);
greh->protocol = proto;
if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
if (flags & TUNNEL_SEQ) {
*ptr = seq;
ptr--;
}
if (flags & TUNNEL_KEY) {
*ptr = key;
ptr--;
}
if (flags & TUNNEL_CSUM &&
!(skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
*ptr = 0;
*(__sum16 *)ptr = gre_checksum(skb);
}
}
}
static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const struct iphdr *tnl_params,
__be16 proto) __be16 proto)
...@@ -493,8 +339,9 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -493,8 +339,9 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
tunnel->o_seqno++; tunnel->o_seqno++;
/* Push GRE header. */ /* Push GRE header. */
build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, gre_build_header(skb, tunnel->tun_hlen,
proto, tunnel->parms.o_key, htonl(tunnel->o_seqno)); tunnel->parms.o_flags, proto, tunnel->parms.o_key,
htonl(tunnel->o_seqno));
skb_set_inner_protocol(skb, proto); skb_set_inner_protocol(skb, proto);
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
...@@ -552,7 +399,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -552,7 +399,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
fl.saddr); fl.saddr);
} }
tunnel_hlen = ip_gre_calc_hlen(key->tun_flags); tunnel_hlen = gre_calc_hlen(key->tun_flags);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ tunnel_hlen + sizeof(struct iphdr); + tunnel_hlen + sizeof(struct iphdr);
...@@ -571,7 +418,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -571,7 +418,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_free_rt; goto err_free_rt;
flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB), gre_build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
tunnel_id_to_key(tun_info->key.tun_id), 0); tunnel_id_to_key(tun_info->key.tun_id), 0);
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
...@@ -694,8 +541,8 @@ static int ipgre_tunnel_ioctl(struct net_device *dev, ...@@ -694,8 +541,8 @@ static int ipgre_tunnel_ioctl(struct net_device *dev,
if (err) if (err)
return err; return err;
p.i_flags = tnl_flags_to_gre_flags(p.i_flags); p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
p.o_flags = tnl_flags_to_gre_flags(p.o_flags); p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
return -EFAULT; return -EFAULT;
...@@ -739,7 +586,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, ...@@ -739,7 +586,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph)); iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
greh = (struct gre_base_hdr *)(iph+1); greh = (struct gre_base_hdr *)(iph+1);
greh->flags = tnl_flags_to_gre_flags(t->parms.o_flags); greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
greh->protocol = htons(type); greh->protocol = htons(type);
memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
...@@ -840,7 +687,7 @@ static void __gre_tunnel_init(struct net_device *dev) ...@@ -840,7 +687,7 @@ static void __gre_tunnel_init(struct net_device *dev)
int t_hlen; int t_hlen;
tunnel = netdev_priv(dev); tunnel = netdev_priv(dev);
tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags); tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
tunnel->parms.iph.protocol = IPPROTO_GRE; tunnel->parms.iph.protocol = IPPROTO_GRE;
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
...@@ -1155,8 +1002,10 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) ...@@ -1155,8 +1002,10 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct ip_tunnel_parm *p = &t->parms; struct ip_tunnel_parm *p = &t->parms;
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
nla_put_be16(skb, IFLA_GRE_IFLAGS, tnl_flags_to_gre_flags(p->i_flags)) || nla_put_be16(skb, IFLA_GRE_IFLAGS,
nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) || gre_tnl_flags_to_gre_flags(p->i_flags)) ||
nla_put_be16(skb, IFLA_GRE_OFLAGS,
gre_tnl_flags_to_gre_flags(p->o_flags)) ||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
......
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
#include <net/ip6_fib.h> #include <net/ip6_fib.h>
#include <net/ip6_route.h> #include <net/ip6_route.h>
#include <net/ip6_tunnel.h> #include <net/ip6_tunnel.h>
#include <net/gre.h>
static bool log_ecn_error = true; static bool log_ecn_error = true;
...@@ -443,137 +444,40 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -443,137 +444,40 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
t->err_time = jiffies; t->err_time = jiffies;
} }
static int ip6gre_rcv(struct sk_buff *skb) static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
{ {
const struct ipv6hdr *ipv6h; const struct ipv6hdr *ipv6h;
u8 *h;
__be16 flags;
__sum16 csum = 0;
__be32 key = 0;
u32 seqno = 0;
struct ip6_tnl *tunnel; struct ip6_tnl *tunnel;
int offset = 4;
__be16 gre_proto;
int err;
if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
goto drop;
ipv6h = ipv6_hdr(skb); ipv6h = ipv6_hdr(skb);
h = skb->data;
flags = *(__be16 *)h;
if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
/* - Version must be 0.
- We do not support routing headers.
*/
if (flags&(GRE_VERSION|GRE_ROUTING))
goto drop;
if (flags&GRE_CSUM) {
csum = skb_checksum_simple_validate(skb);
offset += 4;
}
if (flags&GRE_KEY) {
key = *(__be32 *)(h + offset);
offset += 4;
}
if (flags&GRE_SEQ) {
seqno = ntohl(*(__be32 *)(h + offset));
offset += 4;
}
}
gre_proto = *(__be16 *)(h + 2);
tunnel = ip6gre_tunnel_lookup(skb->dev, tunnel = ip6gre_tunnel_lookup(skb->dev,
&ipv6h->saddr, &ipv6h->daddr, key, &ipv6h->saddr, &ipv6h->daddr, tpi->key,
gre_proto); tpi->proto);
if (tunnel) { if (tunnel) {
struct pcpu_sw_netstats *tstats; ip6_tnl_rcv(tunnel, skb, tpi, NULL, false);
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) { return PACKET_RCVD;
tunnel->dev->stats.rx_dropped++;
goto drop;
} }
skb->protocol = gre_proto; return PACKET_REJECT;
/* WCCP version 1 and 2 protocol decoding. }
* - Change protocol to IPv6
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
*/
if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
skb->protocol = htons(ETH_P_IPV6);
if ((*(h + offset) & 0xF0) != 0x40)
offset += 4;
}
skb->mac_header = skb->network_header; static int gre_rcv(struct sk_buff *skb)
__pskb_pull(skb, offset); {
skb_postpull_rcsum(skb, skb_transport_header(skb), offset); struct tnl_ptk_info tpi;
bool csum_err = false;
int hdr_len;
if (((flags&GRE_CSUM) && csum) || if (gre_parse_header(skb, &tpi, &csum_err, &hdr_len) < 0)
(!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
tunnel->dev->stats.rx_crc_errors++;
tunnel->dev->stats.rx_errors++;
goto drop;
}
if (tunnel->parms.i_flags&GRE_SEQ) {
if (!(flags&GRE_SEQ) ||
(tunnel->i_seqno &&
(s32)(seqno - tunnel->i_seqno) < 0)) {
tunnel->dev->stats.rx_fifo_errors++;
tunnel->dev->stats.rx_errors++;
goto drop; goto drop;
}
tunnel->i_seqno = seqno + 1;
}
/* Warning: All skb pointers will be invalidated! */ if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
if (tunnel->dev->type == ARPHRD_ETHER) {
if (!pskb_may_pull(skb, ETH_HLEN)) {
tunnel->dev->stats.rx_length_errors++;
tunnel->dev->stats.rx_errors++;
goto drop; goto drop;
}
ipv6h = ipv6_hdr(skb);
skb->protocol = eth_type_trans(skb, tunnel->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
}
__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
skb_reset_network_header(skb);
err = IP6_ECN_decapsulate(ipv6h, skb);
if (unlikely(err)) {
if (log_ecn_error)
net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n",
&ipv6h->saddr,
ipv6_get_dsfield(ipv6h));
if (err > 1) {
++tunnel->dev->stats.rx_frame_errors;
++tunnel->dev->stats.rx_errors;
goto drop;
}
}
tstats = this_cpu_ptr(tunnel->dev->tstats);
u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
u64_stats_update_end(&tstats->syncp);
netif_rx(skb);
if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
return 0; return 0;
}
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
drop: drop:
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
...@@ -584,199 +488,40 @@ struct ipv6_tel_txoption { ...@@ -584,199 +488,40 @@ struct ipv6_tel_txoption {
__u8 dst_opt[8]; __u8 dst_opt[8];
}; };
static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) static int gre_handle_offloads(struct sk_buff *skb, bool csum)
{
memset(opt, 0, sizeof(struct ipv6_tel_txoption));
opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
opt->dst_opt[3] = 1;
opt->dst_opt[4] = encap_limit;
opt->dst_opt[5] = IPV6_TLV_PADN;
opt->dst_opt[6] = 1;
opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
opt->ops.opt_nflen = 8;
}
static __sum16 gre6_checksum(struct sk_buff *skb)
{ {
__wsum csum; return iptunnel_handle_offloads(skb,
csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
if (skb->ip_summed == CHECKSUM_PARTIAL)
csum = lco_csum(skb);
else
csum = skb_checksum(skb, sizeof(struct ipv6hdr),
skb->len - sizeof(struct ipv6hdr), 0);
return csum_fold(csum);
} }
static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
struct net_device *dev, struct net_device *dev, __u8 dsfield,
__u8 dsfield, struct flowi6 *fl6, int encap_limit,
struct flowi6 *fl6, __u32 *pmtu, __be16 proto)
int encap_limit,
__u32 *pmtu)
{ {
struct ip6_tnl *tunnel = netdev_priv(dev); struct ip6_tnl *tunnel = netdev_priv(dev);
struct net *net = tunnel->net; __be16 protocol = (dev->type == ARPHRD_ETHER) ?
struct net_device *tdev; /* Device to other host */ htons(ETH_P_TEB) : proto;
struct ipv6hdr *ipv6h; /* Our new IP header */
unsigned int min_headroom = 0; /* The extra header space needed */
int gre_hlen;
struct ipv6_tel_txoption opt;
int mtu;
struct dst_entry *dst = NULL, *ndst = NULL;
struct net_device_stats *stats = &tunnel->dev->stats;
int err = -1;
u8 proto;
__be16 protocol;
if (dev->type == ARPHRD_ETHER) if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0; IPCB(skb)->flags = 0;
if (dev->header_ops && dev->type == ARPHRD_IP6GRE) { if (dev->header_ops && dev->type == ARPHRD_IP6GRE)
gre_hlen = 0; fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr;
ipv6h = (struct ipv6hdr *)skb->data; else
fl6->daddr = ipv6h->daddr;
} else {
gre_hlen = tunnel->hlen;
fl6->daddr = tunnel->parms.raddr; fl6->daddr = tunnel->parms.raddr;
}
if (!fl6->flowi6_mark) if (tunnel->parms.o_flags & TUNNEL_SEQ)
dst = dst_cache_get(&tunnel->dst_cache); tunnel->o_seqno++;
if (!dst) { /* Push GRE header. */
dst = ip6_route_output(net, NULL, fl6); gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
if (dst->error) skb_set_inner_protocol(skb, proto);
goto tx_err_link_failure;
dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
goto tx_err_link_failure;
}
ndst = dst;
}
tdev = dst->dev;
if (tdev == dev) {
stats->collisions++;
net_warn_ratelimited("%s: Local routing loop detected!\n",
tunnel->parms.name);
goto tx_err_dst_release;
}
mtu = dst_mtu(dst) - sizeof(*ipv6h); return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
if (encap_limit >= 0) { NEXTHDR_GRE);
min_headroom += 8;
mtu -= 8;
}
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if (skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len > mtu && !skb_is_gso(skb)) {
*pmtu = mtu;
err = -EMSGSIZE;
goto tx_err_dst_release;
}
if (tunnel->err_count > 0) {
if (time_before(jiffies,
tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) {
tunnel->err_count--;
dst_link_failure(skb);
} else
tunnel->err_count = 0;
}
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
min_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
int head_delta = SKB_DATA_ALIGN(min_headroom -
skb_headroom(skb) +
16);
err = pskb_expand_head(skb, max_t(int, head_delta, 0),
0, GFP_ATOMIC);
if (min_headroom > dev->needed_headroom)
dev->needed_headroom = min_headroom;
if (unlikely(err))
goto tx_err_dst_release;
}
if (!fl6->flowi6_mark && ndst)
dst_cache_set_ip6(&tunnel->dst_cache, ndst, &fl6->saddr);
skb_dst_set(skb, dst);
proto = NEXTHDR_GRE;
if (encap_limit >= 0) {
init_tel_txopt(&opt, encap_limit);
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
}
err = iptunnel_handle_offloads(skb,
(tunnel->parms.o_flags & GRE_CSUM) ?
SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
if (err)
goto tx_err_dst_release;
skb_push(skb, gre_hlen);
skb_reset_network_header(skb);
skb_set_transport_header(skb, sizeof(*ipv6h));
/*
* Push down and install the IP header.
*/
ipv6h = ipv6_hdr(skb);
ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
ipv6h->hop_limit = tunnel->parms.hop_limit;
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
ipv6h->daddr = fl6->daddr;
((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags;
protocol = (dev->type == ARPHRD_ETHER) ?
htons(ETH_P_TEB) : skb->protocol;
((__be16 *)(ipv6h + 1))[1] = protocol;
if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
__be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4);
if (tunnel->parms.o_flags&GRE_SEQ) {
++tunnel->o_seqno;
*ptr = htonl(tunnel->o_seqno);
ptr--;
}
if (tunnel->parms.o_flags&GRE_KEY) {
*ptr = tunnel->parms.o_key;
ptr--;
}
if ((tunnel->parms.o_flags & GRE_CSUM) &&
!(skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
*ptr = 0;
*(__sum16 *)ptr = gre6_checksum(skb);
}
}
skb_set_inner_protocol(skb, protocol);
ip6tunnel_xmit(NULL, skb, dev);
return 0;
tx_err_link_failure:
stats->tx_carrier_errors++;
dst_link_failure(skb);
tx_err_dst_release:
dst_release(dst);
return err;
} }
static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
...@@ -795,7 +540,6 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) ...@@ -795,7 +540,6 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
encap_limit = t->parms.encap_limit; encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_GRE;
dsfield = ipv4_get_dsfield(iph); dsfield = ipv4_get_dsfield(iph);
...@@ -805,7 +549,12 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) ...@@ -805,7 +549,12 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark; fl6.flowi6_mark = skb->mark;
err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
if (err)
return -1;
err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
skb->protocol);
if (err != 0) { if (err != 0) {
/* XXX: send ICMP error even if DF is not set. */ /* XXX: send ICMP error even if DF is not set. */
if (err == -EMSGSIZE) if (err == -EMSGSIZE)
...@@ -845,7 +594,6 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) ...@@ -845,7 +594,6 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
encap_limit = t->parms.encap_limit; encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_GRE;
dsfield = ipv6_get_dsfield(ipv6h); dsfield = ipv6_get_dsfield(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
...@@ -855,7 +603,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) ...@@ -855,7 +603,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark; fl6.flowi6_mark = skb->mark;
err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
return -1;
err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
&mtu, skb->protocol);
if (err != 0) { if (err != 0) {
if (err == -EMSGSIZE) if (err == -EMSGSIZE)
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
...@@ -899,7 +651,11 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) ...@@ -899,7 +651,11 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = skb->protocol; fl6.flowi6_proto = skb->protocol;
err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu); err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
if (err)
return err;
err = __gre6_xmit(skb, dev, 0, &fl6, encap_limit, &mtu, skb->protocol);
return err; return err;
} }
...@@ -1075,6 +831,8 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev, ...@@ -1075,6 +831,8 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
struct net *net = t->net; struct net *net = t->net;
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
memset(&p1, 0, sizeof(p1));
switch (cmd) { switch (cmd) {
case SIOCGETTUNNEL: case SIOCGETTUNNEL:
if (dev == ign->fb_tunnel_dev) { if (dev == ign->fb_tunnel_dev) {
...@@ -1174,15 +932,6 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev, ...@@ -1174,15 +932,6 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
return err; return err;
} }
static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{
if (new_mtu < 68 ||
new_mtu > 0xFFF8 - dev->hard_header_len)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static int ip6gre_header(struct sk_buff *skb, struct net_device *dev, static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, unsigned short type,
const void *daddr, const void *saddr, unsigned int len) const void *daddr, const void *saddr, unsigned int len)
...@@ -1226,7 +975,7 @@ static const struct net_device_ops ip6gre_netdev_ops = { ...@@ -1226,7 +975,7 @@ static const struct net_device_ops ip6gre_netdev_ops = {
.ndo_uninit = ip6gre_tunnel_uninit, .ndo_uninit = ip6gre_tunnel_uninit,
.ndo_start_xmit = ip6gre_tunnel_xmit, .ndo_start_xmit = ip6gre_tunnel_xmit,
.ndo_do_ioctl = ip6gre_tunnel_ioctl, .ndo_do_ioctl = ip6gre_tunnel_ioctl,
.ndo_change_mtu = ip6gre_tunnel_change_mtu, .ndo_change_mtu = ip6_tnl_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64, .ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_get_iflink = ip6_tnl_get_iflink, .ndo_get_iflink = ip6_tnl_get_iflink,
}; };
...@@ -1242,17 +991,11 @@ static void ip6gre_dev_free(struct net_device *dev) ...@@ -1242,17 +991,11 @@ static void ip6gre_dev_free(struct net_device *dev)
static void ip6gre_tunnel_setup(struct net_device *dev) static void ip6gre_tunnel_setup(struct net_device *dev)
{ {
struct ip6_tnl *t;
dev->netdev_ops = &ip6gre_netdev_ops; dev->netdev_ops = &ip6gre_netdev_ops;
dev->destructor = ip6gre_dev_free; dev->destructor = ip6gre_dev_free;
dev->type = ARPHRD_IP6GRE; dev->type = ARPHRD_IP6GRE;
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4;
dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4;
t = netdev_priv(dev);
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
dev->flags |= IFF_NOARP; dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr); dev->addr_len = sizeof(struct in6_addr);
netif_keep_dst(dev); netif_keep_dst(dev);
...@@ -1262,6 +1005,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) ...@@ -1262,6 +1005,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
{ {
struct ip6_tnl *tunnel; struct ip6_tnl *tunnel;
int ret; int ret;
int t_hlen;
tunnel = netdev_priv(dev); tunnel = netdev_priv(dev);
...@@ -1280,6 +1024,16 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) ...@@ -1280,6 +1024,16 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
return ret; return ret;
} }
tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
dev->mtu = ETH_DATA_LEN - t_hlen - 4;
if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
return 0; return 0;
} }
...@@ -1318,7 +1072,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev) ...@@ -1318,7 +1072,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
static struct inet6_protocol ip6gre_protocol __read_mostly = { static struct inet6_protocol ip6gre_protocol __read_mostly = {
.handler = ip6gre_rcv, .handler = gre_rcv,
.err_handler = ip6gre_err, .err_handler = ip6gre_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
}; };
...@@ -1514,7 +1268,7 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = { ...@@ -1514,7 +1268,7 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
.ndo_start_xmit = ip6gre_tunnel_xmit, .ndo_start_xmit = ip6gre_tunnel_xmit,
.ndo_set_mac_address = eth_mac_addr, .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ip6gre_tunnel_change_mtu, .ndo_change_mtu = ip6_tnl_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64, .ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_get_iflink = ip6_tnl_get_iflink, .ndo_get_iflink = ip6_tnl_get_iflink,
}; };
......
...@@ -238,6 +238,7 @@ static void ip6_dev_free(struct net_device *dev) ...@@ -238,6 +238,7 @@ static void ip6_dev_free(struct net_device *dev)
{ {
struct ip6_tnl *t = netdev_priv(dev); struct ip6_tnl *t = netdev_priv(dev);
gro_cells_destroy(&t->gro_cells);
dst_cache_destroy(&t->dst_cache); dst_cache_destroy(&t->dst_cache);
free_percpu(dev->tstats); free_percpu(dev->tstats);
free_netdev(dev); free_netdev(dev);
...@@ -753,96 +754,156 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t, ...@@ -753,96 +754,156 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
} }
EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl); EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
/** static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
* ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally const struct tnl_ptk_info *tpi,
* @skb: received socket buffer struct metadata_dst *tun_dst,
* @protocol: ethernet protocol ID
* @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN
*
* Return: 0
**/
static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
__u8 ipproto,
int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
const struct ipv6hdr *ipv6h, const struct ipv6hdr *ipv6h,
struct sk_buff *skb)) struct sk_buff *skb),
bool log_ecn_err)
{ {
struct ip6_tnl *t; struct pcpu_sw_netstats *tstats;
const struct ipv6hdr *ipv6h = ipv6_hdr(skb); const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
u8 tproto;
int err; int err;
rcu_read_lock(); if ((!(tpi->flags & TUNNEL_CSUM) &&
t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr); (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
if (t) { ((tpi->flags & TUNNEL_CSUM) &&
struct pcpu_sw_netstats *tstats; !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
tunnel->dev->stats.rx_crc_errors++;
tunnel->dev->stats.rx_errors++;
goto drop;
}
tproto = ACCESS_ONCE(t->parms.proto); if (tunnel->parms.i_flags & TUNNEL_SEQ) {
if (tproto != ipproto && tproto != 0) { if (!(tpi->flags & TUNNEL_SEQ) ||
rcu_read_unlock(); (tunnel->i_seqno &&
goto discard; (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
tunnel->dev->stats.rx_fifo_errors++;
tunnel->dev->stats.rx_errors++;
goto drop;
}
tunnel->i_seqno = ntohl(tpi->seq) + 1;
} }
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { skb->protocol = tpi->proto;
rcu_read_unlock();
goto discard; /* Warning: All skb pointers will be invalidated! */
if (tunnel->dev->type == ARPHRD_ETHER) {
if (!pskb_may_pull(skb, ETH_HLEN)) {
tunnel->dev->stats.rx_length_errors++;
tunnel->dev->stats.rx_errors++;
goto drop;
} }
if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) { ipv6h = ipv6_hdr(skb);
t->dev->stats.rx_dropped++; skb->protocol = eth_type_trans(skb, tunnel->dev);
rcu_read_unlock(); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
goto discard; } else {
skb->dev = tunnel->dev;
} }
skb->mac_header = skb->network_header;
skb_reset_network_header(skb); skb_reset_network_header(skb);
skb->protocol = htons(protocol);
memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
__skb_tunnel_rx(skb, t->dev, t->net); __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
err = dscp_ecn_decapsulate(t, ipv6h, skb); err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
if (unlikely(err)) { if (unlikely(err)) {
if (log_ecn_error) if (log_ecn_err)
net_info_ratelimited("non-ECT from %pI6 with dsfield=%#x\n", net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
&ipv6h->saddr, &ipv6h->saddr,
ipv6_get_dsfield(ipv6h)); ipv6_get_dsfield(ipv6h));
if (err > 1) { if (err > 1) {
++t->dev->stats.rx_frame_errors; ++tunnel->dev->stats.rx_frame_errors;
++t->dev->stats.rx_errors; ++tunnel->dev->stats.rx_errors;
rcu_read_unlock(); goto drop;
goto discard;
} }
} }
tstats = this_cpu_ptr(t->dev->tstats); tstats = this_cpu_ptr(tunnel->dev->tstats);
u64_stats_update_begin(&tstats->syncp); u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++; tstats->rx_packets++;
tstats->rx_bytes += skb->len; tstats->rx_bytes += skb->len;
u64_stats_update_end(&tstats->syncp); u64_stats_update_end(&tstats->syncp);
netif_rx(skb); skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
rcu_read_unlock(); gro_cells_receive(&tunnel->gro_cells, skb);
return 0;
drop:
kfree_skb(skb);
return 0; return 0;
}
int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
const struct tnl_ptk_info *tpi,
struct metadata_dst *tun_dst,
bool log_ecn_err)
{
return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
log_ecn_err);
}
EXPORT_SYMBOL(ip6_tnl_rcv);
static const struct tnl_ptk_info tpi_v6 = {
/* no tunnel info required for ipxip6. */
.proto = htons(ETH_P_IPV6),
};
static const struct tnl_ptk_info tpi_v4 = {
/* no tunnel info required for ipxip6. */
.proto = htons(ETH_P_IP),
};
static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
const struct tnl_ptk_info *tpi,
int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
const struct ipv6hdr *ipv6h,
struct sk_buff *skb))
{
struct ip6_tnl *t;
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
int ret = -1;
rcu_read_lock();
t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
if (t) {
u8 tproto = ACCESS_ONCE(t->parms.proto);
if (tproto != ipproto && tproto != 0)
goto drop;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
goto drop;
if (iptunnel_pull_header(skb, 0, tpi->proto, false))
goto drop;
ret = __ip6_tnl_rcv(t, skb, tpi, NULL, dscp_ecn_decapsulate,
log_ecn_error);
} }
rcu_read_unlock(); rcu_read_unlock();
return 1;
discard: return ret;
drop:
rcu_read_unlock();
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
} }
static int ip4ip6_rcv(struct sk_buff *skb) static int ip4ip6_rcv(struct sk_buff *skb)
{ {
return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP, return ipxip6_rcv(skb, IPPROTO_IP, &tpi_v4,
ip4ip6_dscp_ecn_decapsulate); ip4ip6_dscp_ecn_decapsulate);
} }
static int ip6ip6_rcv(struct sk_buff *skb) static int ip6ip6_rcv(struct sk_buff *skb)
{ {
return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6, return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
ip6ip6_dscp_ecn_decapsulate); ip6ip6_dscp_ecn_decapsulate);
} }
...@@ -918,13 +979,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t, ...@@ -918,13 +979,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl); EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
/** /**
* ip6_tnl_xmit2 - encapsulate packet and send * ip6_tnl_xmit - encapsulate packet and send
* @skb: the outgoing socket buffer * @skb: the outgoing socket buffer
* @dev: the outgoing tunnel device * @dev: the outgoing tunnel device
* @dsfield: dscp code for outer header * @dsfield: dscp code for outer header
* @fl: flow of tunneled packet * @fl6: flow of tunneled packet
* @encap_limit: encapsulation limit * @encap_limit: encapsulation limit
* @pmtu: Path MTU is stored if packet is too big * @pmtu: Path MTU is stored if packet is too big
* @proto: next header value
* *
* Description: * Description:
* Build new header and do some sanity checks on the packet before sending * Build new header and do some sanity checks on the packet before sending
...@@ -936,12 +998,9 @@ EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl); ...@@ -936,12 +998,9 @@ EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
* %-EMSGSIZE message too big. return mtu in this case. * %-EMSGSIZE message too big. return mtu in this case.
**/ **/
static int ip6_tnl_xmit2(struct sk_buff *skb, int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
struct net_device *dev, struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
__u8 dsfield, __u8 proto)
struct flowi6 *fl6,
int encap_limit,
__u32 *pmtu)
{ {
struct ip6_tnl *t = netdev_priv(dev); struct ip6_tnl *t = netdev_priv(dev);
struct net *net = t->net; struct net *net = t->net;
...@@ -952,7 +1011,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, ...@@ -952,7 +1011,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
struct net_device *tdev; struct net_device *tdev;
int mtu; int mtu;
unsigned int max_headroom = sizeof(struct ipv6hdr); unsigned int max_headroom = sizeof(struct ipv6hdr);
u8 proto;
int err = -1; int err = -1;
/* NBMA tunnel */ /* NBMA tunnel */
...@@ -1014,12 +1072,23 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, ...@@ -1014,12 +1072,23 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
mtu = IPV6_MIN_MTU; mtu = IPV6_MIN_MTU;
if (skb_dst(skb)) if (skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len > mtu) { if (skb->len > mtu && !skb_is_gso(skb)) {
*pmtu = mtu; *pmtu = mtu;
err = -EMSGSIZE; err = -EMSGSIZE;
goto tx_err_dst_release; goto tx_err_dst_release;
} }
if (t->err_count > 0) {
if (time_before(jiffies,
t->err_time + IP6TUNNEL_ERR_TIMEO)) {
t->err_count--;
dst_link_failure(skb);
} else {
t->err_count = 0;
}
}
skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
/* /*
...@@ -1047,7 +1116,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, ...@@ -1047,7 +1116,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
skb->transport_header = skb->network_header; skb->transport_header = skb->network_header;
proto = fl6->flowi6_proto;
if (encap_limit >= 0) { if (encap_limit >= 0) {
init_tel_txopt(&opt, encap_limit); init_tel_txopt(&opt, encap_limit);
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
...@@ -1058,6 +1126,11 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, ...@@ -1058,6 +1126,11 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
skb->encapsulation = 1; skb->encapsulation = 1;
} }
max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
+ dst->header_len;
if (max_headroom > dev->needed_headroom)
dev->needed_headroom = max_headroom;
skb_push(skb, sizeof(struct ipv6hdr)); skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb); skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb); ipv6h = ipv6_hdr(skb);
...@@ -1076,6 +1149,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, ...@@ -1076,6 +1149,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
dst_release(dst); dst_release(dst);
return err; return err;
} }
EXPORT_SYMBOL(ip6_tnl_xmit);
static inline int static inline int
ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
...@@ -1099,7 +1173,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1099,7 +1173,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
encap_limit = t->parms.encap_limit; encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_IPIP;
dsfield = ipv4_get_dsfield(iph); dsfield = ipv4_get_dsfield(iph);
...@@ -1109,7 +1182,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1109,7 +1182,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark; fl6.flowi6_mark = skb->mark;
err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
IPPROTO_IPIP);
if (err != 0) { if (err != 0) {
/* XXX: send ICMP error even if DF is not set. */ /* XXX: send ICMP error even if DF is not set. */
if (err == -EMSGSIZE) if (err == -EMSGSIZE)
...@@ -1153,7 +1227,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1153,7 +1227,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
encap_limit = t->parms.encap_limit; encap_limit = t->parms.encap_limit;
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_IPV6;
dsfield = ipv6_get_dsfield(ipv6h); dsfield = ipv6_get_dsfield(ipv6h);
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
...@@ -1163,7 +1236,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1163,7 +1236,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
fl6.flowi6_mark = skb->mark; fl6.flowi6_mark = skb->mark;
err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
IPPROTO_IPV6);
if (err != 0) { if (err != 0) {
if (err == -EMSGSIZE) if (err == -EMSGSIZE)
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
...@@ -1174,7 +1248,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1174,7 +1248,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
} }
static netdev_tx_t static netdev_tx_t
ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct ip6_tnl *t = netdev_priv(dev); struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats; struct net_device_stats *stats = &t->dev->stats;
...@@ -1370,6 +1444,8 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ...@@ -1370,6 +1444,8 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct net *net = t->net; struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
memset(&p1, 0, sizeof(p1));
switch (cmd) { switch (cmd) {
case SIOCGETTUNNEL: case SIOCGETTUNNEL:
if (dev == ip6n->fb_tnl_dev) { if (dev == ip6n->fb_tnl_dev) {
...@@ -1464,8 +1540,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ...@@ -1464,8 +1540,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
* %-EINVAL if mtu too small * %-EINVAL if mtu too small
**/ **/
static int int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
{ {
struct ip6_tnl *tnl = netdev_priv(dev); struct ip6_tnl *tnl = netdev_priv(dev);
...@@ -1481,6 +1556,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) ...@@ -1481,6 +1556,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu; dev->mtu = new_mtu;
return 0; return 0;
} }
EXPORT_SYMBOL(ip6_tnl_change_mtu);
int ip6_tnl_get_iflink(const struct net_device *dev) int ip6_tnl_get_iflink(const struct net_device *dev)
{ {
...@@ -1493,7 +1569,7 @@ EXPORT_SYMBOL(ip6_tnl_get_iflink); ...@@ -1493,7 +1569,7 @@ EXPORT_SYMBOL(ip6_tnl_get_iflink);
static const struct net_device_ops ip6_tnl_netdev_ops = { static const struct net_device_ops ip6_tnl_netdev_ops = {
.ndo_init = ip6_tnl_dev_init, .ndo_init = ip6_tnl_dev_init,
.ndo_uninit = ip6_tnl_dev_uninit, .ndo_uninit = ip6_tnl_dev_uninit,
.ndo_start_xmit = ip6_tnl_xmit, .ndo_start_xmit = ip6_tnl_start_xmit,
.ndo_do_ioctl = ip6_tnl_ioctl, .ndo_do_ioctl = ip6_tnl_ioctl,
.ndo_change_mtu = ip6_tnl_change_mtu, .ndo_change_mtu = ip6_tnl_change_mtu,
.ndo_get_stats = ip6_get_stats, .ndo_get_stats = ip6_get_stats,
...@@ -1549,13 +1625,25 @@ ip6_tnl_dev_init_gen(struct net_device *dev) ...@@ -1549,13 +1625,25 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
return -ENOMEM; return -ENOMEM;
ret = dst_cache_init(&t->dst_cache, GFP_KERNEL); ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
if (ret) { if (ret)
goto free_stats;
ret = gro_cells_init(&t->gro_cells, dev);
if (ret)
goto destroy_dst;
t->hlen = 0;
t->tun_hlen = 0;
return 0;
destroy_dst:
dst_cache_destroy(&t->dst_cache);
free_stats:
free_percpu(dev->tstats); free_percpu(dev->tstats);
dev->tstats = NULL; dev->tstats = NULL;
return ret;
}
return 0; return ret;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment