Commit d1ac3b16 authored by David S. Miller's avatar David S. Miller

Merge branch 'ipv6-tunnel-cleanups'

Tom Herbert says:

====================
net: Cleanup IPv6 ip tunnels

The IPv6 tunnel code is very different from IPv4 code. There is a lot
of redundancy with the IPv4 code, particularly in the GRE tunneling.

This patch set cleans up the tunnel code to make the IPv6 code look
more like the IPv4 code and use common functions between the two
stacks where possible.

This work should make it easier to maintain and extend the IPv6 ip
tunnels.

Items in this patch set:
  - Cleanup IPv6 tunnel receive path (ip6_tnl_rcv). Includes using
    gro_cells and exporting ip6_tnl_rcv so the ip6_gre can call it
  - Move GRE functions to common header file (tx functions) or
    gre_demux.c (rx functions like gre_parse_header)
  - Call common GRE functions from IPv6 GRE
  - Create ip6_tnl_xmit (to be like ip_tunnel_xmit)

Tested:
  Ran super_netperf tests for TCP_RR and TCP_STREAM for:
    - IPv4 over gre, gretap, gre6, gre6tap
    - IPv6 over gre, gretap, gre6, gre6tap
    - ipip
    - ip6ip6
    - ipip/gue
    - IPv6 over gre/gue
    - IPv4 over gre/gue
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 570d6320 b05229f4
...@@ -25,4 +25,108 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version); ...@@ -25,4 +25,108 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version);
struct net_device *gretap_fb_dev_create(struct net *net, const char *name, struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
u8 name_assign_type); u8 name_assign_type);
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, int *hdr_len);
static inline int gre_calc_hlen(__be16 o_flags)
{
int addend = 4;
if (o_flags & TUNNEL_CSUM)
addend += 4;
if (o_flags & TUNNEL_KEY)
addend += 4;
if (o_flags & TUNNEL_SEQ)
addend += 4;
return addend;
}
static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
{
__be16 tflags = 0;
if (flags & GRE_CSUM)
tflags |= TUNNEL_CSUM;
if (flags & GRE_ROUTING)
tflags |= TUNNEL_ROUTING;
if (flags & GRE_KEY)
tflags |= TUNNEL_KEY;
if (flags & GRE_SEQ)
tflags |= TUNNEL_SEQ;
if (flags & GRE_STRICT)
tflags |= TUNNEL_STRICT;
if (flags & GRE_REC)
tflags |= TUNNEL_REC;
if (flags & GRE_VERSION)
tflags |= TUNNEL_VERSION;
return tflags;
}
static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags)
{
__be16 flags = 0;
if (tflags & TUNNEL_CSUM)
flags |= GRE_CSUM;
if (tflags & TUNNEL_ROUTING)
flags |= GRE_ROUTING;
if (tflags & TUNNEL_KEY)
flags |= GRE_KEY;
if (tflags & TUNNEL_SEQ)
flags |= GRE_SEQ;
if (tflags & TUNNEL_STRICT)
flags |= GRE_STRICT;
if (tflags & TUNNEL_REC)
flags |= GRE_REC;
if (tflags & TUNNEL_VERSION)
flags |= GRE_VERSION;
return flags;
}
static inline __sum16 gre_checksum(struct sk_buff *skb)
{
__wsum csum;
if (skb->ip_summed == CHECKSUM_PARTIAL)
csum = lco_csum(skb);
else
csum = skb_checksum(skb, 0, skb->len, 0);
return csum_fold(csum);
}
static inline void gre_build_header(struct sk_buff *skb, int hdr_len,
__be16 flags, __be16 proto,
__be32 key, __be32 seq)
{
struct gre_base_hdr *greh;
skb_push(skb, hdr_len);
skb_reset_transport_header(skb);
greh = (struct gre_base_hdr *)skb->data;
greh->flags = gre_tnl_flags_to_gre_flags(flags);
greh->protocol = proto;
if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
if (flags & TUNNEL_SEQ) {
*ptr = seq;
ptr--;
}
if (flags & TUNNEL_KEY) {
*ptr = key;
ptr--;
}
if (flags & TUNNEL_CSUM &&
!(skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
*ptr = 0;
*(__sum16 *)ptr = gre_checksum(skb);
}
}
}
#endif #endif
...@@ -42,6 +42,7 @@ struct ip6_tnl { ...@@ -42,6 +42,7 @@ struct ip6_tnl {
struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */ struct flowi fl; /* flowi template for xmit */
struct dst_cache dst_cache; /* cached dst */ struct dst_cache dst_cache; /* cached dst */
struct gro_cells gro_cells;
int err_count; int err_count;
unsigned long err_time; unsigned long err_time;
...@@ -49,8 +50,10 @@ struct ip6_tnl { ...@@ -49,8 +50,10 @@ struct ip6_tnl {
/* These fields used only by GRE */ /* These fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */ __u32 i_seqno; /* The last seen seqno */
__u32 o_seqno; /* The last output seqno */ __u32 o_seqno; /* The last output seqno */
int hlen; /* Precalculated GRE header length */ int hlen; /* tun_hlen + encap_hlen */
int tun_hlen; /* Precalculated header length */
int mlink; int mlink;
}; };
/* Tunnel encapsulation limit destination sub-option */ /* Tunnel encapsulation limit destination sub-option */
...@@ -63,13 +66,19 @@ struct ipv6_tlv_tnl_enc_lim { ...@@ -63,13 +66,19 @@ struct ipv6_tlv_tnl_enc_lim {
int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr); const struct in6_addr *raddr);
int ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
bool log_ecn_error);
int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr); const struct in6_addr *raddr);
int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
struct flowi6 *fl6, int encap_limit, __u32 *pmtu, __u8 proto);
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw); __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
const struct in6_addr *raddr); const struct in6_addr *raddr);
struct net *ip6_tnl_get_link_net(const struct net_device *dev); struct net *ip6_tnl_get_link_net(const struct net_device *dev);
int ip6_tnl_get_iflink(const struct net_device *dev); int ip6_tnl_get_iflink(const struct net_device *dev);
int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
#ifdef CONFIG_INET #ifdef CONFIG_INET
static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
......
...@@ -60,6 +60,70 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version) ...@@ -60,6 +60,70 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
} }
EXPORT_SYMBOL_GPL(gre_del_protocol); EXPORT_SYMBOL_GPL(gre_del_protocol);
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, int *ret_hdr_len)
{
const struct gre_base_hdr *greh;
__be32 *options;
int hdr_len;
if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
return -EINVAL;
greh = (struct gre_base_hdr *)skb_transport_header(skb);
if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
tpi->flags = gre_flags_to_tnl_flags(greh->flags);
hdr_len = gre_calc_hlen(tpi->flags);
if (!pskb_may_pull(skb, hdr_len))
return -EINVAL;
greh = (struct gre_base_hdr *)skb_transport_header(skb);
tpi->proto = greh->protocol;
options = (__be32 *)(greh + 1);
if (greh->flags & GRE_CSUM) {
if (skb_checksum_simple_validate(skb)) {
*csum_err = true;
return -EINVAL;
}
skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
null_compute_pseudo);
options++;
}
if (greh->flags & GRE_KEY) {
tpi->key = *options;
options++;
} else {
tpi->key = 0;
}
if (unlikely(greh->flags & GRE_SEQ)) {
tpi->seq = *options;
options++;
} else {
tpi->seq = 0;
}
/* WCCP version 1 and 2 protocol decoding.
* - Change protocol to IP
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
*/
if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
tpi->proto = htons(ETH_P_IP);
if ((*(u8 *)options & 0xF0) != 0x40) {
hdr_len += 4;
if (!pskb_may_pull(skb, hdr_len))
return -EINVAL;
}
}
*ret_hdr_len = hdr_len;
return 0;
}
EXPORT_SYMBOL(gre_parse_header);
static int gre_rcv(struct sk_buff *skb) static int gre_rcv(struct sk_buff *skb)
{ {
const struct gre_protocol *proto; const struct gre_protocol *proto;
......
...@@ -122,125 +122,6 @@ static int ipgre_tunnel_init(struct net_device *dev); ...@@ -122,125 +122,6 @@ static int ipgre_tunnel_init(struct net_device *dev);
static int ipgre_net_id __read_mostly; static int ipgre_net_id __read_mostly;
static int gre_tap_net_id __read_mostly; static int gre_tap_net_id __read_mostly;
static int ip_gre_calc_hlen(__be16 o_flags)
{
int addend = 4;
if (o_flags & TUNNEL_CSUM)
addend += 4;
if (o_flags & TUNNEL_KEY)
addend += 4;
if (o_flags & TUNNEL_SEQ)
addend += 4;
return addend;
}
static __be16 gre_flags_to_tnl_flags(__be16 flags)
{
__be16 tflags = 0;
if (flags & GRE_CSUM)
tflags |= TUNNEL_CSUM;
if (flags & GRE_ROUTING)
tflags |= TUNNEL_ROUTING;
if (flags & GRE_KEY)
tflags |= TUNNEL_KEY;
if (flags & GRE_SEQ)
tflags |= TUNNEL_SEQ;
if (flags & GRE_STRICT)
tflags |= TUNNEL_STRICT;
if (flags & GRE_REC)
tflags |= TUNNEL_REC;
if (flags & GRE_VERSION)
tflags |= TUNNEL_VERSION;
return tflags;
}
static __be16 tnl_flags_to_gre_flags(__be16 tflags)
{
__be16 flags = 0;
if (tflags & TUNNEL_CSUM)
flags |= GRE_CSUM;
if (tflags & TUNNEL_ROUTING)
flags |= GRE_ROUTING;
if (tflags & TUNNEL_KEY)
flags |= GRE_KEY;
if (tflags & TUNNEL_SEQ)
flags |= GRE_SEQ;
if (tflags & TUNNEL_STRICT)
flags |= GRE_STRICT;
if (tflags & TUNNEL_REC)
flags |= GRE_REC;
if (tflags & TUNNEL_VERSION)
flags |= GRE_VERSION;
return flags;
}
static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err)
{
const struct gre_base_hdr *greh;
__be32 *options;
int hdr_len;
if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
return -EINVAL;
greh = (struct gre_base_hdr *)skb_transport_header(skb);
if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
tpi->flags = gre_flags_to_tnl_flags(greh->flags);
hdr_len = ip_gre_calc_hlen(tpi->flags);
if (!pskb_may_pull(skb, hdr_len))
return -EINVAL;
greh = (struct gre_base_hdr *)skb_transport_header(skb);
tpi->proto = greh->protocol;
options = (__be32 *)(greh + 1);
if (greh->flags & GRE_CSUM) {
if (skb_checksum_simple_validate(skb)) {
*csum_err = true;
return -EINVAL;
}
skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
null_compute_pseudo);
options++;
}
if (greh->flags & GRE_KEY) {
tpi->key = *options;
options++;
} else {
tpi->key = 0;
}
if (unlikely(greh->flags & GRE_SEQ)) {
tpi->seq = *options;
options++;
} else {
tpi->seq = 0;
}
/* WCCP version 1 and 2 protocol decoding.
* - Change protocol to IP
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
*/
if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
tpi->proto = htons(ETH_P_IP);
if ((*(u8 *)options & 0xF0) != 0x40) {
hdr_len += 4;
if (!pskb_may_pull(skb, hdr_len))
return -EINVAL;
}
}
return iptunnel_pull_header(skb, hdr_len, tpi->proto, false);
}
static void ipgre_err(struct sk_buff *skb, u32 info, static void ipgre_err(struct sk_buff *skb, u32 info,
const struct tnl_ptk_info *tpi) const struct tnl_ptk_info *tpi)
{ {
...@@ -340,12 +221,16 @@ static void gre_err(struct sk_buff *skb, u32 info) ...@@ -340,12 +221,16 @@ static void gre_err(struct sk_buff *skb, u32 info)
const int code = icmp_hdr(skb)->code; const int code = icmp_hdr(skb)->code;
struct tnl_ptk_info tpi; struct tnl_ptk_info tpi;
bool csum_err = false; bool csum_err = false;
int hdr_len;
if (parse_gre_header(skb, &tpi, &csum_err)) { if (gre_parse_header(skb, &tpi, &csum_err, &hdr_len)) {
if (!csum_err) /* ignore csum errors. */ if (!csum_err) /* ignore csum errors. */
return; return;
} }
if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
return;
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, dev_net(skb->dev), info, ipv4_update_pmtu(skb, dev_net(skb->dev), info,
skb->dev->ifindex, 0, IPPROTO_GRE, 0); skb->dev->ifindex, 0, IPPROTO_GRE, 0);
...@@ -419,6 +304,7 @@ static int gre_rcv(struct sk_buff *skb) ...@@ -419,6 +304,7 @@ static int gre_rcv(struct sk_buff *skb)
{ {
struct tnl_ptk_info tpi; struct tnl_ptk_info tpi;
bool csum_err = false; bool csum_err = false;
int hdr_len;
#ifdef CONFIG_NET_IPGRE_BROADCAST #ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) { if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
...@@ -428,7 +314,10 @@ static int gre_rcv(struct sk_buff *skb) ...@@ -428,7 +314,10 @@ static int gre_rcv(struct sk_buff *skb)
} }
#endif #endif
if (parse_gre_header(skb, &tpi, &csum_err) < 0) if (gre_parse_header(skb, &tpi, &csum_err, &hdr_len) < 0)
goto drop;
if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
goto drop; goto drop;
if (ipgre_rcv(skb, &tpi) == PACKET_RCVD) if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
...@@ -440,49 +329,6 @@ static int gre_rcv(struct sk_buff *skb) ...@@ -440,49 +329,6 @@ static int gre_rcv(struct sk_buff *skb)
return 0; return 0;
} }
static __sum16 gre_checksum(struct sk_buff *skb)
{
__wsum csum;
if (skb->ip_summed == CHECKSUM_PARTIAL)
csum = lco_csum(skb);
else
csum = skb_checksum(skb, 0, skb->len, 0);
return csum_fold(csum);
}
static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
__be16 proto, __be32 key, __be32 seq)
{
struct gre_base_hdr *greh;
skb_push(skb, hdr_len);
skb_reset_transport_header(skb);
greh = (struct gre_base_hdr *)skb->data;
greh->flags = tnl_flags_to_gre_flags(flags);
greh->protocol = proto;
if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
if (flags & TUNNEL_SEQ) {
*ptr = seq;
ptr--;
}
if (flags & TUNNEL_KEY) {
*ptr = key;
ptr--;
}
if (flags & TUNNEL_CSUM &&
!(skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
*ptr = 0;
*(__sum16 *)ptr = gre_checksum(skb);
}
}
}
static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const struct iphdr *tnl_params,
__be16 proto) __be16 proto)
...@@ -493,8 +339,9 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -493,8 +339,9 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
tunnel->o_seqno++; tunnel->o_seqno++;
/* Push GRE header. */ /* Push GRE header. */
build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, gre_build_header(skb, tunnel->tun_hlen,
proto, tunnel->parms.o_key, htonl(tunnel->o_seqno)); tunnel->parms.o_flags, proto, tunnel->parms.o_key,
htonl(tunnel->o_seqno));
skb_set_inner_protocol(skb, proto); skb_set_inner_protocol(skb, proto);
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
...@@ -552,7 +399,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -552,7 +399,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
fl.saddr); fl.saddr);
} }
tunnel_hlen = ip_gre_calc_hlen(key->tun_flags); tunnel_hlen = gre_calc_hlen(key->tun_flags);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ tunnel_hlen + sizeof(struct iphdr); + tunnel_hlen + sizeof(struct iphdr);
...@@ -571,7 +418,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -571,7 +418,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_free_rt; goto err_free_rt;
flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB), gre_build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
tunnel_id_to_key(tun_info->key.tun_id), 0); tunnel_id_to_key(tun_info->key.tun_id), 0);
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
...@@ -694,8 +541,8 @@ static int ipgre_tunnel_ioctl(struct net_device *dev, ...@@ -694,8 +541,8 @@ static int ipgre_tunnel_ioctl(struct net_device *dev,
if (err) if (err)
return err; return err;
p.i_flags = tnl_flags_to_gre_flags(p.i_flags); p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
p.o_flags = tnl_flags_to_gre_flags(p.o_flags); p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
return -EFAULT; return -EFAULT;
...@@ -739,7 +586,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, ...@@ -739,7 +586,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph)); iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
greh = (struct gre_base_hdr *)(iph+1); greh = (struct gre_base_hdr *)(iph+1);
greh->flags = tnl_flags_to_gre_flags(t->parms.o_flags); greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
greh->protocol = htons(type); greh->protocol = htons(type);
memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
...@@ -840,7 +687,7 @@ static void __gre_tunnel_init(struct net_device *dev) ...@@ -840,7 +687,7 @@ static void __gre_tunnel_init(struct net_device *dev)
int t_hlen; int t_hlen;
tunnel = netdev_priv(dev); tunnel = netdev_priv(dev);
tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags); tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
tunnel->parms.iph.protocol = IPPROTO_GRE; tunnel->parms.iph.protocol = IPPROTO_GRE;
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
...@@ -1155,8 +1002,10 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) ...@@ -1155,8 +1002,10 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct ip_tunnel_parm *p = &t->parms; struct ip_tunnel_parm *p = &t->parms;
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
nla_put_be16(skb, IFLA_GRE_IFLAGS, tnl_flags_to_gre_flags(p->i_flags)) || nla_put_be16(skb, IFLA_GRE_IFLAGS,
nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) || gre_tnl_flags_to_gre_flags(p->i_flags)) ||
nla_put_be16(skb, IFLA_GRE_OFLAGS,
gre_tnl_flags_to_gre_flags(p->o_flags)) ||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment