Commit 3b19860c authored by David S. Miller's avatar David S. Miller

Merge branch 'bridge-per-vlan-dst_metadata-support'

Roopa Prabhu says:

====================
bridge: per vlan dst_metadata support

High level summary:
lwt and dst_metadata have enabled vxlan l3 deployments
to use a single vxlan netdev for multiple vnis eliminating the scalability
problem with using a single vxlan netdev per vni. This series tries to
do the same for vxlan netdevs in pure l2 bridged networks.
Use-case/deployment and details are below.

Deployment scerario details:
As we know VXLAN is used to build layer 2 virtual networks across the
underlay layer3 infrastructure. A VXLAN tunnel endpoint (VTEP)
originates and terminates VXLAN tunnels. And a VTEP can be a TOR switch
or a vswitch in the hypervisor. This patch series mainly
focuses on the TOR switch configured as a Vtep. Vxlan segment ID (vni)
along with vlan id is used to identify layer 2 segments in a vxlan
overlay network. Vxlan bridging is the function provided by Vteps to terminate
vxlan tunnels and map the vxlan vni to traditional end host vlan. This is
covered in the "VXLAN Deployment Scenarios" in sections 6 and 6.1 in RFC 7348.
To provide vxlan bridging function, a vtep has to map vlan to a vni. The rfc
says that the ingress VTEP device shall remove the IEEE 802.1Q VLAN tag in
the original Layer 2 packet if there is one before encapsulating the packet
into the VXLAN format to transmit it through the underlay network. The remote
VTEP devices have information about the VLAN in which the packet will be
placed based on their own VLAN-to-VXLAN VNI mapping configurations.

Existing solution:
Without this patch series one can deploy such a vtep configuration by
adding the local ports and vxlan netdevs into a vlan filtering bridge.
The local ports are configured as trunk ports carrying all vlans.
A vxlan netdev per vni is added to the bridge. Vlan mapping to vni is
achieved by configuring the vlan as pvid on the corresponding vxlan netdev.
The vxlan netdev only receives traffic corresponding to the vlan it is mapped
to. This configuration maps traffic belonging to a vlan to the corresponding
vxlan segment.

          -----------------------------------
         |              bridge               |
         |                                   |
          -----------------------------------
            |100,200       |100 (pvid)    |200 (pvid)
            |              |              |
           swp1          vxlan1000      vxlan2000

This provides the required vxlan bridging function but poses a
scalability problem with using a separate vxlan netdev for each vni.

Solution in this patch series:
The Goal is to use a single vxlan device to carry all vnis similar
to the vxlan collect metadata mode but additionally allowing the bridge
and vxlan driver to carry all the forwarding information and also learn.
This implementation uses the existing dst_metadata infrastructure to map
vlan to a tunnel id.
- vxlan driver changes:
    - enable collect metadata mode to be used with learning,
      replication and fdb
    - A single fdb table hashed by (mac, vni)
    - rx path already has the vni
    - tx path expects a vni in the packet with dst_metadata and relies
      on learnt or static forwarding information table to forward the packet

- Bridge driver changes: per vlan dst_metadata support:
    - Our use case is vxlan and 1-1 mapping between vlan and vni, but I have
      kept the api generic for any tunnel info
    - Uapi to configure/unconfigure/dump per vlan tunnel data
    - new bridge port flag to turn this feature on/off. off by default
    - ingress hook:
        - if port is a tunnel port, use tunnel info in
          attached dst_metadata to map it to a local vlan
    - egress hook:
        - if port is a tunnel port, use tunnel info attached to vlan
          to set dst_metadata on the skb

Other approaches tried and vetoed:
- tc vlan push/pop and tunnel metadata dst:
    - though tc can be used to do part of this, these patches address a deployment
      case where bridge driver vlan filtering and forwarding information
      database along with vxlan driver forwarding information table and learning
      are required.
- making vxlan driver understand vlan-vni mapping:
    - I had a series almost ready with this one but soon realized
      it duplicated a lot of vlan handling code in the vxlan driver
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5a0fd98b 11538d03
This diff is collapsed.
...@@ -47,6 +47,7 @@ struct br_ip_list { ...@@ -47,6 +47,7 @@ struct br_ip_list {
#define BR_PROXYARP_WIFI BIT(10) #define BR_PROXYARP_WIFI BIT(10)
#define BR_MCAST_FLOOD BIT(11) #define BR_MCAST_FLOOD BIT(11)
#define BR_MULTICAST_TO_UNICAST BIT(12) #define BR_MULTICAST_TO_UNICAST BIT(12)
#define BR_VLAN_TUNNEL BIT(13)
#define BR_DEFAULT_AGEING_TIME (300 * HZ) #define BR_DEFAULT_AGEING_TIME (300 * HZ)
......
...@@ -58,6 +58,7 @@ struct ip_tunnel_key { ...@@ -58,6 +58,7 @@ struct ip_tunnel_key {
/* Flags for ip_tunnel_info mode. */ /* Flags for ip_tunnel_info mode. */
#define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */ #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
#define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */ #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
#define IP_TUNNEL_INFO_BRIDGE 0x04 /* represents a bridged tunnel id */
/* Maximum tunnel options length. */ /* Maximum tunnel options length. */
#define IP_TUNNEL_OPTS_MAX \ #define IP_TUNNEL_OPTS_MAX \
......
...@@ -118,6 +118,7 @@ enum { ...@@ -118,6 +118,7 @@ enum {
IFLA_BRIDGE_FLAGS, IFLA_BRIDGE_FLAGS,
IFLA_BRIDGE_MODE, IFLA_BRIDGE_MODE,
IFLA_BRIDGE_VLAN_INFO, IFLA_BRIDGE_VLAN_INFO,
IFLA_BRIDGE_VLAN_TUNNEL_INFO,
__IFLA_BRIDGE_MAX, __IFLA_BRIDGE_MAX,
}; };
#define IFLA_BRIDGE_MAX (__IFLA_BRIDGE_MAX - 1) #define IFLA_BRIDGE_MAX (__IFLA_BRIDGE_MAX - 1)
...@@ -134,6 +135,16 @@ struct bridge_vlan_info { ...@@ -134,6 +135,16 @@ struct bridge_vlan_info {
__u16 vid; __u16 vid;
}; };
enum {
IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC,
IFLA_BRIDGE_VLAN_TUNNEL_ID,
IFLA_BRIDGE_VLAN_TUNNEL_VID,
IFLA_BRIDGE_VLAN_TUNNEL_FLAGS,
__IFLA_BRIDGE_VLAN_TUNNEL_MAX,
};
#define IFLA_BRIDGE_VLAN_TUNNEL_MAX (__IFLA_BRIDGE_VLAN_TUNNEL_MAX - 1)
struct bridge_vlan_xstats { struct bridge_vlan_xstats {
__u64 rx_bytes; __u64 rx_bytes;
__u64 rx_packets; __u64 rx_packets;
......
...@@ -322,6 +322,7 @@ enum { ...@@ -322,6 +322,7 @@ enum {
IFLA_BRPORT_PAD, IFLA_BRPORT_PAD,
IFLA_BRPORT_MCAST_FLOOD, IFLA_BRPORT_MCAST_FLOOD,
IFLA_BRPORT_MCAST_TO_UCAST, IFLA_BRPORT_MCAST_TO_UCAST,
IFLA_BRPORT_VLAN_TUNNEL,
__IFLA_BRPORT_MAX __IFLA_BRPORT_MAX
}; };
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
......
...@@ -26,6 +26,7 @@ enum { ...@@ -26,6 +26,7 @@ enum {
NDA_IFINDEX, NDA_IFINDEX,
NDA_MASTER, NDA_MASTER,
NDA_LINK_NETNSID, NDA_LINK_NETNSID,
NDA_SRC_VNI,
__NDA_MAX __NDA_MAX
}; };
......
...@@ -6,7 +6,8 @@ obj-$(CONFIG_BRIDGE) += bridge.o ...@@ -6,7 +6,8 @@ obj-$(CONFIG_BRIDGE) += bridge.o
bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \ bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
br_ioctl.o br_stp.o br_stp_bpdu.o \ br_ioctl.o br_stp.o br_stp_bpdu.o \
br_stp_if.o br_stp_timer.o br_netlink.o br_stp_if.o br_stp_timer.o br_netlink.o \
br_netlink_tunnel.o
bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
...@@ -18,7 +19,7 @@ obj-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o ...@@ -18,7 +19,7 @@ obj-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o br_vlan_tunnel.o
bridge-$(CONFIG_NET_SWITCHDEV) += br_switchdev.o bridge-$(CONFIG_NET_SWITCHDEV) += br_switchdev.o
......
...@@ -80,7 +80,7 @@ static void __br_forward(const struct net_bridge_port *to, ...@@ -80,7 +80,7 @@ static void __br_forward(const struct net_bridge_port *to,
int br_hook; int br_hook;
vg = nbp_vlan_group_rcu(to); vg = nbp_vlan_group_rcu(to);
skb = br_handle_vlan(to->br, vg, skb); skb = br_handle_vlan(to->br, to, vg, skb);
if (!skb) if (!skb)
return; return;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/rculist.h> #include <linux/rculist.h>
#include "br_private.h" #include "br_private.h"
#include "br_private_tunnel.h"
/* Hook for brouter */ /* Hook for brouter */
br_should_route_hook_t __rcu *br_should_route_hook __read_mostly; br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
...@@ -57,7 +58,7 @@ static int br_pass_frame_up(struct sk_buff *skb) ...@@ -57,7 +58,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
indev = skb->dev; indev = skb->dev;
skb->dev = brdev; skb->dev = brdev;
skb = br_handle_vlan(br, vg, skb); skb = br_handle_vlan(br, NULL, vg, skb);
if (!skb) if (!skb)
return NET_RX_DROP; return NET_RX_DROP;
/* update the multicast stats if the packet is IGMP/MLD */ /* update the multicast stats if the packet is IGMP/MLD */
...@@ -261,6 +262,11 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb) ...@@ -261,6 +262,11 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_CONSUMED; return RX_HANDLER_CONSUMED;
p = br_port_get_rcu(skb->dev); p = br_port_get_rcu(skb->dev);
if (p->flags & BR_VLAN_TUNNEL) {
if (br_handle_ingress_vlan_tunnel(skb, p,
nbp_vlan_group_rcu(p)))
goto drop;
}
if (unlikely(is_link_local_ether_addr(dest))) { if (unlikely(is_link_local_ether_addr(dest))) {
u16 fwd_mask = p->br->group_fwd_mask_required; u16 fwd_mask = p->br->group_fwd_mask_required;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "br_private.h" #include "br_private.h"
#include "br_private_stp.h" #include "br_private_stp.h"
#include "br_private_tunnel.h"
static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg, static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
u32 filter_mask) u32 filter_mask)
...@@ -95,9 +96,10 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev, ...@@ -95,9 +96,10 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
u32 filter_mask) u32 filter_mask)
{ {
struct net_bridge_vlan_group *vg = NULL; struct net_bridge_vlan_group *vg = NULL;
struct net_bridge_port *p; struct net_bridge_port *p = NULL;
struct net_bridge *br; struct net_bridge *br;
int num_vlan_infos; int num_vlan_infos;
size_t vinfo_sz = 0;
rcu_read_lock(); rcu_read_lock();
if (br_port_exists(dev)) { if (br_port_exists(dev)) {
...@@ -110,8 +112,13 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev, ...@@ -110,8 +112,13 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask); num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
rcu_read_unlock(); rcu_read_unlock();
if (p && (p->flags & BR_VLAN_TUNNEL))
vinfo_sz += br_get_vlan_tunnel_info_size(vg);
/* Each VLAN is returned in bridge_vlan_info along with flags */ /* Each VLAN is returned in bridge_vlan_info along with flags */
return num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info)); vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
return vinfo_sz;
} }
static inline size_t br_port_info_size(void) static inline size_t br_port_info_size(void)
...@@ -128,6 +135,7 @@ static inline size_t br_port_info_size(void) ...@@ -128,6 +135,7 @@ static inline size_t br_port_info_size(void)
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
+ nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */
+ nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
+ nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */ + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */ + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */
...@@ -194,7 +202,9 @@ static int br_port_fill_attrs(struct sk_buff *skb, ...@@ -194,7 +202,9 @@ static int br_port_fill_attrs(struct sk_buff *skb,
nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) || nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK, nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
p->topology_change_ack) || p->topology_change_ack) ||
nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending)) nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
BR_VLAN_TUNNEL)))
return -EMSGSIZE; return -EMSGSIZE;
timerval = br_timer_value(&p->message_age_timer); timerval = br_timer_value(&p->message_age_timer);
...@@ -417,6 +427,9 @@ static int br_fill_ifinfo(struct sk_buff *skb, ...@@ -417,6 +427,9 @@ static int br_fill_ifinfo(struct sk_buff *skb,
err = br_fill_ifvlaninfo_compressed(skb, vg); err = br_fill_ifvlaninfo_compressed(skb, vg);
else else
err = br_fill_ifvlaninfo(skb, vg); err = br_fill_ifvlaninfo(skb, vg);
if (port && (port->flags & BR_VLAN_TUNNEL))
err = br_fill_vlan_tunnel_info(skb, vg);
rcu_read_unlock(); rcu_read_unlock();
if (err) if (err)
goto nla_put_failure; goto nla_put_failure;
...@@ -517,60 +530,91 @@ static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p, ...@@ -517,60 +530,91 @@ static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
return err; return err;
} }
static int br_afspec(struct net_bridge *br, static int br_process_vlan_info(struct net_bridge *br,
struct net_bridge_port *p, struct net_bridge_port *p, int cmd,
struct nlattr *af_spec, struct bridge_vlan_info *vinfo_curr,
int cmd) struct bridge_vlan_info **vinfo_last)
{ {
struct bridge_vlan_info *vinfo_start = NULL; if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK)
struct bridge_vlan_info *vinfo = NULL;
struct nlattr *attr;
int err = 0;
int rem;
nla_for_each_nested(attr, af_spec, rem) {
if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
continue;
if (nla_len(attr) != sizeof(struct bridge_vlan_info))
return -EINVAL;
vinfo = nla_data(attr);
if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
return -EINVAL; return -EINVAL;
if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
if (vinfo_start) if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
/* check if we are already processing a range */
if (*vinfo_last)
return -EINVAL; return -EINVAL;
vinfo_start = vinfo; *vinfo_last = vinfo_curr;
/* don't allow range of pvids */ /* don't allow range of pvids */
if (vinfo_start->flags & BRIDGE_VLAN_INFO_PVID) if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID)
return -EINVAL; return -EINVAL;
continue; return 0;
} }
if (vinfo_start) { if (*vinfo_last) {
struct bridge_vlan_info tmp_vinfo; struct bridge_vlan_info tmp_vinfo;
int v; int v, err;
if (!(vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END)) if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END))
return -EINVAL; return -EINVAL;
if (vinfo->vid <= vinfo_start->vid) if (vinfo_curr->vid <= (*vinfo_last)->vid)
return -EINVAL; return -EINVAL;
memcpy(&tmp_vinfo, vinfo_start, memcpy(&tmp_vinfo, *vinfo_last,
sizeof(struct bridge_vlan_info)); sizeof(struct bridge_vlan_info));
for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
for (v = vinfo_start->vid; v <= vinfo->vid; v++) {
tmp_vinfo.vid = v; tmp_vinfo.vid = v;
err = br_vlan_info(br, p, cmd, &tmp_vinfo); err = br_vlan_info(br, p, cmd, &tmp_vinfo);
if (err) if (err)
break; break;
} }
vinfo_start = NULL; *vinfo_last = NULL;
} else {
err = br_vlan_info(br, p, cmd, vinfo); return 0;
} }
return br_vlan_info(br, p, cmd, vinfo_curr);
}
static int br_afspec(struct net_bridge *br,
struct net_bridge_port *p,
struct nlattr *af_spec,
int cmd)
{
struct bridge_vlan_info *vinfo_curr = NULL;
struct bridge_vlan_info *vinfo_last = NULL;
struct nlattr *attr;
struct vtunnel_info tinfo_last = {};
struct vtunnel_info tinfo_curr = {};
int err = 0, rem;
nla_for_each_nested(attr, af_spec, rem) {
err = 0;
switch (nla_type(attr)) {
case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
if (!(p->flags & BR_VLAN_TUNNEL))
return -EINVAL;
err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
if (err) if (err)
return err;
err = br_process_vlan_tunnel_info(br, p, cmd,
&tinfo_curr,
&tinfo_last);
if (err)
return err;
break; break;
case IFLA_BRIDGE_VLAN_INFO:
if (nla_len(attr) != sizeof(struct bridge_vlan_info))
return -EINVAL;
vinfo_curr = nla_data(attr);
err = br_process_vlan_info(br, p, cmd, vinfo_curr,
&vinfo_last);
if (err)
return err;
break;
}
if (err)
return err;
} }
return err; return err;
...@@ -630,8 +674,9 @@ static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], ...@@ -630,8 +674,9 @@ static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
/* Process bridge protocol info on port */ /* Process bridge protocol info on port */
static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
{ {
int err;
unsigned long old_flags = p->flags; unsigned long old_flags = p->flags;
bool br_vlan_tunnel_old = false;
int err;
br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
...@@ -644,6 +689,11 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) ...@@ -644,6 +689,11 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI); br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false;
br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
nbp_vlan_tunnel_info_flush(p);
if (tb[IFLA_BRPORT_COST]) { if (tb[IFLA_BRPORT_COST]) {
err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
if (err) if (err)
......
/*
* Bridge per vlan tunnel port dst_metadata netlink control interface
*
* Authors:
* Roopa Prabhu <roopa@cumulusnetworks.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <uapi/linux/if_bridge.h>
#include <net/dst_metadata.h>
#include "br_private.h"
#include "br_private_tunnel.h"
static size_t __get_vlan_tinfo_size(void)
{
return nla_total_size(0) + /* nest IFLA_BRIDGE_VLAN_TUNNEL_INFO */
nla_total_size(sizeof(u32)) + /* IFLA_BRIDGE_VLAN_TUNNEL_ID */
nla_total_size(sizeof(u16)) + /* IFLA_BRIDGE_VLAN_TUNNEL_VID */
nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_VLAN_TUNNEL_FLAGS */
}
static bool vlan_tunnel_id_isrange(struct net_bridge_vlan *v,
struct net_bridge_vlan *v_end)
{
__be32 tunid_curr = tunnel_id_to_key32(v->tinfo.tunnel_id);
__be32 tunid_end = tunnel_id_to_key32(v_end->tinfo.tunnel_id);
return (be32_to_cpu(tunid_curr) - be32_to_cpu(tunid_end)) == 1;
}
static int __get_num_vlan_tunnel_infos(struct net_bridge_vlan_group *vg)
{
struct net_bridge_vlan *v, *v_start = NULL, *v_end = NULL;
int num_tinfos = 0;
/* Count number of vlan infos */
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
/* only a context, bridge vlan not activated */
if (!br_vlan_should_use(v) || !v->tinfo.tunnel_id)
continue;
if (!v_start) {
goto initvars;
} else if ((v->vid - v_end->vid) == 1 &&
vlan_tunnel_id_isrange(v_end, v) == 1) {
v_end = v;
continue;
} else {
if ((v_end->vid - v->vid) > 0 &&
vlan_tunnel_id_isrange(v_end, v) > 0)
num_tinfos += 2;
else
num_tinfos += 1;
}
initvars:
v_start = v;
v_end = v;
}
if (v_start) {
if ((v_end->vid - v->vid) > 0 &&
vlan_tunnel_id_isrange(v_end, v) > 0)
num_tinfos += 2;
else
num_tinfos += 1;
}
return num_tinfos;
}
int br_get_vlan_tunnel_info_size(struct net_bridge_vlan_group *vg)
{
int num_tinfos;
if (!vg)
return 0;
rcu_read_lock();
num_tinfos = __get_num_vlan_tunnel_infos(vg);
rcu_read_unlock();
return num_tinfos * __get_vlan_tinfo_size();
}
static int br_fill_vlan_tinfo(struct sk_buff *skb, u16 vid,
__be64 tunnel_id, u16 flags)
{
__be32 tid = tunnel_id_to_key32(tunnel_id);
struct nlattr *tmap;
tmap = nla_nest_start(skb, IFLA_BRIDGE_VLAN_TUNNEL_INFO);
if (!tmap)
return -EMSGSIZE;
if (nla_put_u32(skb, IFLA_BRIDGE_VLAN_TUNNEL_ID,
be32_to_cpu(tid)))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_BRIDGE_VLAN_TUNNEL_VID,
vid))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_BRIDGE_VLAN_TUNNEL_FLAGS,
flags))
goto nla_put_failure;
nla_nest_end(skb, tmap);
return 0;
nla_put_failure:
nla_nest_cancel(skb, tmap);
return -EMSGSIZE;
}
static int br_fill_vlan_tinfo_range(struct sk_buff *skb,
struct net_bridge_vlan *vtbegin,
struct net_bridge_vlan *vtend)
{
int err;
if (vtbegin && vtend && (vtend->vid - vtbegin->vid) > 0) {
/* add range to skb */
err = br_fill_vlan_tinfo(skb, vtbegin->vid,
vtbegin->tinfo.tunnel_id,
BRIDGE_VLAN_INFO_RANGE_BEGIN);
if (err)
return err;
err = br_fill_vlan_tinfo(skb, vtend->vid,
vtend->tinfo.tunnel_id,
BRIDGE_VLAN_INFO_RANGE_END);
if (err)
return err;
} else {
err = br_fill_vlan_tinfo(skb, vtbegin->vid,
vtbegin->tinfo.tunnel_id,
0);
if (err)
return err;
}
return 0;
}
int br_fill_vlan_tunnel_info(struct sk_buff *skb,
struct net_bridge_vlan_group *vg)
{
struct net_bridge_vlan *vtbegin = NULL;
struct net_bridge_vlan *vtend = NULL;
struct net_bridge_vlan *v;
int err;
/* Count number of vlan infos */
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
/* only a context, bridge vlan not activated */
if (!br_vlan_should_use(v))
continue;
if (!v->tinfo.tunnel_dst)
continue;
if (!vtbegin) {
goto initvars;
} else if ((v->vid - vtend->vid) == 1 &&
vlan_tunnel_id_isrange(v, vtend)) {
vtend = v;
continue;
} else {
err = br_fill_vlan_tinfo_range(skb, vtbegin, vtend);
if (err)
return err;
}
initvars:
vtbegin = v;
vtend = v;
}
if (vtbegin) {
err = br_fill_vlan_tinfo_range(skb, vtbegin, vtend);
if (err)
return err;
}
return 0;
}
static const struct nla_policy vlan_tunnel_policy[IFLA_BRIDGE_VLAN_TUNNEL_MAX + 1] = {
[IFLA_BRIDGE_VLAN_TUNNEL_ID] = { .type = NLA_U32 },
[IFLA_BRIDGE_VLAN_TUNNEL_VID] = { .type = NLA_U16 },
[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS] = { .type = NLA_U16 },
};
static int br_vlan_tunnel_info(struct net_bridge_port *p, int cmd,
u16 vid, u32 tun_id)
{
int err = 0;
if (!p)
return -EINVAL;
switch (cmd) {
case RTM_SETLINK:
err = nbp_vlan_tunnel_info_add(p, vid, tun_id);
break;
case RTM_DELLINK:
nbp_vlan_tunnel_info_delete(p, vid);
break;
}
return err;
}
int br_parse_vlan_tunnel_info(struct nlattr *attr,
struct vtunnel_info *tinfo)
{
struct nlattr *tb[IFLA_BRIDGE_VLAN_TUNNEL_MAX + 1];
u32 tun_id;
u16 vid, flags = 0;
int err;
memset(tinfo, 0, sizeof(*tinfo));
if (!tb[IFLA_BRIDGE_VLAN_TUNNEL_ID] ||
!tb[IFLA_BRIDGE_VLAN_TUNNEL_VID])
return -EINVAL;
err = nla_parse_nested(tb, IFLA_BRIDGE_VLAN_TUNNEL_MAX,
attr, vlan_tunnel_policy);
if (err < 0)
return err;
tun_id = nla_get_u32(tb[IFLA_BRIDGE_VLAN_TUNNEL_ID]);
vid = nla_get_u16(tb[IFLA_BRIDGE_VLAN_TUNNEL_VID]);
if (vid >= VLAN_VID_MASK)
return -ERANGE;
if (tb[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS])
flags = nla_get_u16(tb[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS]);
tinfo->tunid = tun_id;
tinfo->vid = vid;
tinfo->flags = flags;
return 0;
}
int br_process_vlan_tunnel_info(struct net_bridge *br,
struct net_bridge_port *p, int cmd,
struct vtunnel_info *tinfo_curr,
struct vtunnel_info *tinfo_last)
{
int err;
if (tinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
if (tinfo_last->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN)
return -EINVAL;
memcpy(tinfo_last, tinfo_curr, sizeof(struct vtunnel_info));
} else if (tinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END) {
int t, v;
if (!(tinfo_last->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN))
return -EINVAL;
if ((tinfo_curr->vid - tinfo_last->vid) !=
(tinfo_curr->tunid - tinfo_last->tunid))
return -EINVAL;
t = tinfo_last->tunid;
for (v = tinfo_last->vid; v <= tinfo_curr->vid; v++) {
err = br_vlan_tunnel_info(p, cmd, v, t);
if (err)
return err;
t++;
}
memset(tinfo_last, 0, sizeof(struct vtunnel_info));
memset(tinfo_curr, 0, sizeof(struct vtunnel_info));
} else {
if (tinfo_last->flags)
return -EINVAL;
err = br_vlan_tunnel_info(p, cmd, tinfo_curr->vid,
tinfo_curr->tunid);
if (err)
return err;
memset(tinfo_last, 0, sizeof(struct vtunnel_info));
memset(tinfo_curr, 0, sizeof(struct vtunnel_info));
}
return 0;
}
...@@ -91,6 +91,11 @@ struct br_vlan_stats { ...@@ -91,6 +91,11 @@ struct br_vlan_stats {
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
}; };
struct br_tunnel_info {
__be64 tunnel_id;
struct metadata_dst *tunnel_dst;
};
/** /**
* struct net_bridge_vlan - per-vlan entry * struct net_bridge_vlan - per-vlan entry
* *
...@@ -113,6 +118,7 @@ struct br_vlan_stats { ...@@ -113,6 +118,7 @@ struct br_vlan_stats {
*/ */
struct net_bridge_vlan { struct net_bridge_vlan {
struct rhash_head vnode; struct rhash_head vnode;
struct rhash_head tnode;
u16 vid; u16 vid;
u16 flags; u16 flags;
struct br_vlan_stats __percpu *stats; struct br_vlan_stats __percpu *stats;
...@@ -124,6 +130,9 @@ struct net_bridge_vlan { ...@@ -124,6 +130,9 @@ struct net_bridge_vlan {
atomic_t refcnt; atomic_t refcnt;
struct net_bridge_vlan *brvlan; struct net_bridge_vlan *brvlan;
}; };
struct br_tunnel_info tinfo;
struct list_head vlist; struct list_head vlist;
struct rcu_head rcu; struct rcu_head rcu;
...@@ -145,6 +154,7 @@ struct net_bridge_vlan { ...@@ -145,6 +154,7 @@ struct net_bridge_vlan {
*/ */
struct net_bridge_vlan_group { struct net_bridge_vlan_group {
struct rhashtable vlan_hash; struct rhashtable vlan_hash;
struct rhashtable tunnel_hash;
struct list_head vlan_list; struct list_head vlan_list;
u16 num_vlans; u16 num_vlans;
u16 pvid; u16 pvid;
...@@ -765,6 +775,7 @@ bool br_allowed_egress(struct net_bridge_vlan_group *vg, ...@@ -765,6 +775,7 @@ bool br_allowed_egress(struct net_bridge_vlan_group *vg,
const struct sk_buff *skb); const struct sk_buff *skb);
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid); bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid);
struct sk_buff *br_handle_vlan(struct net_bridge *br, struct sk_buff *br_handle_vlan(struct net_bridge *br,
const struct net_bridge_port *port,
struct net_bridge_vlan_group *vg, struct net_bridge_vlan_group *vg,
struct sk_buff *skb); struct sk_buff *skb);
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags); int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
...@@ -864,6 +875,7 @@ static inline bool br_should_learn(struct net_bridge_port *p, ...@@ -864,6 +875,7 @@ static inline bool br_should_learn(struct net_bridge_port *p,
} }
static inline struct sk_buff *br_handle_vlan(struct net_bridge *br, static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
const struct net_bridge_port *port,
struct net_bridge_vlan_group *vg, struct net_bridge_vlan_group *vg,
struct sk_buff *skb) struct sk_buff *skb)
{ {
......
/*
* Bridge per vlan tunnels
*
* Authors:
* Roopa Prabhu <roopa@cumulusnetworks.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _BR_PRIVATE_TUNNEL_H
#define _BR_PRIVATE_TUNNEL_H
struct vtunnel_info {
u32 tunid;
u16 vid;
u16 flags;
};
/* br_netlink_tunnel.c */
int br_parse_vlan_tunnel_info(struct nlattr *attr,
struct vtunnel_info *tinfo);
int br_process_vlan_tunnel_info(struct net_bridge *br,
struct net_bridge_port *p,
int cmd,
struct vtunnel_info *tinfo_curr,
struct vtunnel_info *tinfo_last);
int br_get_vlan_tunnel_info_size(struct net_bridge_vlan_group *vg);
int br_fill_vlan_tunnel_info(struct sk_buff *skb,
struct net_bridge_vlan_group *vg);
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
/* br_vlan_tunnel.c */
int vlan_tunnel_init(struct net_bridge_vlan_group *vg);
void vlan_tunnel_deinit(struct net_bridge_vlan_group *vg);
int nbp_vlan_tunnel_info_delete(struct net_bridge_port *port, u16 vid);
int nbp_vlan_tunnel_info_add(struct net_bridge_port *port, u16 vid, u32 tun_id);
void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port);
void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan);
int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_port *p,
struct net_bridge_vlan_group *vg);
int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_vlan *vlan);
#else
static inline int vlan_tunnel_init(struct net_bridge_vlan_group *vg)
{
return 0;
}
static inline int nbp_vlan_tunnel_info_delete(struct net_bridge_port *port,
u16 vid)
{
return 0;
}
static inline int nbp_vlan_tunnel_info_add(struct net_bridge_port *port,
u16 vid, u32 tun_id)
{
return 0;
}
static inline void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port)
{
}
static inline void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan)
{
}
static inline int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_port *p,
struct net_bridge_vlan_group *vg)
{
return 0;
}
#endif
#endif
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <net/switchdev.h> #include <net/switchdev.h>
#include "br_private.h" #include "br_private.h"
#include "br_private_tunnel.h"
static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg, static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
const void *ptr) const void *ptr)
...@@ -310,6 +311,7 @@ static int __vlan_del(struct net_bridge_vlan *v) ...@@ -310,6 +311,7 @@ static int __vlan_del(struct net_bridge_vlan *v)
} }
if (masterv != v) { if (masterv != v) {
vlan_tunnel_info_del(vg, v);
rhashtable_remove_fast(&vg->vlan_hash, &v->vnode, rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
br_vlan_rht_params); br_vlan_rht_params);
__vlan_del_list(v); __vlan_del_list(v);
...@@ -325,6 +327,7 @@ static void __vlan_group_free(struct net_bridge_vlan_group *vg) ...@@ -325,6 +327,7 @@ static void __vlan_group_free(struct net_bridge_vlan_group *vg)
{ {
WARN_ON(!list_empty(&vg->vlan_list)); WARN_ON(!list_empty(&vg->vlan_list));
rhashtable_destroy(&vg->vlan_hash); rhashtable_destroy(&vg->vlan_hash);
vlan_tunnel_deinit(vg);
kfree(vg); kfree(vg);
} }
...@@ -338,6 +341,7 @@ static void __vlan_flush(struct net_bridge_vlan_group *vg) ...@@ -338,6 +341,7 @@ static void __vlan_flush(struct net_bridge_vlan_group *vg)
} }
struct sk_buff *br_handle_vlan(struct net_bridge *br, struct sk_buff *br_handle_vlan(struct net_bridge *br,
const struct net_bridge_port *p,
struct net_bridge_vlan_group *vg, struct net_bridge_vlan_group *vg,
struct sk_buff *skb) struct sk_buff *skb)
{ {
...@@ -378,6 +382,12 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, ...@@ -378,6 +382,12 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
skb->vlan_tci = 0; skb->vlan_tci = 0;
if (p && (p->flags & BR_VLAN_TUNNEL) &&
br_handle_egress_vlan_tunnel(skb, v)) {
kfree_skb(skb);
return NULL;
}
out: out:
return skb; return skb;
} }
...@@ -613,6 +623,8 @@ int br_vlan_delete(struct net_bridge *br, u16 vid) ...@@ -613,6 +623,8 @@ int br_vlan_delete(struct net_bridge *br, u16 vid)
br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid); br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
br_fdb_delete_by_port(br, NULL, vid, 0); br_fdb_delete_by_port(br, NULL, vid, 0);
vlan_tunnel_info_del(vg, v);
return __vlan_del(v); return __vlan_del(v);
} }
...@@ -918,6 +930,9 @@ int br_vlan_init(struct net_bridge *br) ...@@ -918,6 +930,9 @@ int br_vlan_init(struct net_bridge *br)
ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params); ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
if (ret) if (ret)
goto err_rhtbl; goto err_rhtbl;
ret = vlan_tunnel_init(vg);
if (ret)
goto err_tunnel_init;
INIT_LIST_HEAD(&vg->vlan_list); INIT_LIST_HEAD(&vg->vlan_list);
br->vlan_proto = htons(ETH_P_8021Q); br->vlan_proto = htons(ETH_P_8021Q);
br->default_pvid = 1; br->default_pvid = 1;
...@@ -932,6 +947,8 @@ int br_vlan_init(struct net_bridge *br) ...@@ -932,6 +947,8 @@ int br_vlan_init(struct net_bridge *br)
return ret; return ret;
err_vlan_add: err_vlan_add:
vlan_tunnel_deinit(vg);
err_tunnel_init:
rhashtable_destroy(&vg->vlan_hash); rhashtable_destroy(&vg->vlan_hash);
err_rhtbl: err_rhtbl:
kfree(vg); kfree(vg);
...@@ -961,6 +978,9 @@ int nbp_vlan_init(struct net_bridge_port *p) ...@@ -961,6 +978,9 @@ int nbp_vlan_init(struct net_bridge_port *p)
ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params); ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
if (ret) if (ret)
goto err_rhtbl; goto err_rhtbl;
ret = vlan_tunnel_init(vg);
if (ret)
goto err_tunnel_init;
INIT_LIST_HEAD(&vg->vlan_list); INIT_LIST_HEAD(&vg->vlan_list);
rcu_assign_pointer(p->vlgrp, vg); rcu_assign_pointer(p->vlgrp, vg);
if (p->br->default_pvid) { if (p->br->default_pvid) {
...@@ -976,8 +996,10 @@ int nbp_vlan_init(struct net_bridge_port *p) ...@@ -976,8 +996,10 @@ int nbp_vlan_init(struct net_bridge_port *p)
err_vlan_add: err_vlan_add:
RCU_INIT_POINTER(p->vlgrp, NULL); RCU_INIT_POINTER(p->vlgrp, NULL);
synchronize_rcu(); synchronize_rcu();
rhashtable_destroy(&vg->vlan_hash); vlan_tunnel_deinit(vg);
err_vlan_enabled: err_vlan_enabled:
err_tunnel_init:
rhashtable_destroy(&vg->vlan_hash);
err_rhtbl: err_rhtbl:
kfree(vg); kfree(vg);
......
/*
* Bridge per vlan tunnel port dst_metadata handling code
*
* Authors:
* Roopa Prabhu <roopa@cumulusnetworks.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <net/switchdev.h>
#include <net/dst_metadata.h>
#include "br_private.h"
#include "br_private_tunnel.h"
static inline int br_vlan_tunid_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct net_bridge_vlan *vle = ptr;
__be64 tunid = *(__be64 *)arg->key;
return vle->tinfo.tunnel_id != tunid;
}
static const struct rhashtable_params br_vlan_tunnel_rht_params = {
.head_offset = offsetof(struct net_bridge_vlan, tnode),
.key_offset = offsetof(struct net_bridge_vlan, tinfo.tunnel_id),
.key_len = sizeof(__be64),
.nelem_hint = 3,
.locks_mul = 1,
.obj_cmpfn = br_vlan_tunid_cmp,
.automatic_shrinking = true,
};
static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
u64 tunnel_id)
{
return rhashtable_lookup_fast(tbl, &tunnel_id,
br_vlan_tunnel_rht_params);
}
void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan)
{
if (!vlan->tinfo.tunnel_dst)
return;
rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode,
br_vlan_tunnel_rht_params);
vlan->tinfo.tunnel_id = 0;
dst_release(&vlan->tinfo.tunnel_dst->dst);
vlan->tinfo.tunnel_dst = NULL;
}
static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan, u32 tun_id)
{
struct metadata_dst *metadata = NULL;
__be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
int err;
if (vlan->tinfo.tunnel_dst)
return -EEXIST;
metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
key, 0);
if (!metadata)
return -EINVAL;
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE;
vlan->tinfo.tunnel_dst = metadata;
vlan->tinfo.tunnel_id = key;
err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode,
br_vlan_tunnel_rht_params);
if (err)
goto out;
return 0;
out:
dst_release(&vlan->tinfo.tunnel_dst->dst);
return err;
}
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
*/
int nbp_vlan_tunnel_info_add(struct net_bridge_port *port, u16 vid, u32 tun_id)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *vlan;
ASSERT_RTNL();
vg = nbp_vlan_group(port);
vlan = br_vlan_find(vg, vid);
if (!vlan)
return -EINVAL;
return __vlan_tunnel_info_add(vg, vlan, tun_id);
}
/* Must be protected by RTNL.
* Must be called with vid in range from 1 to 4094 inclusive.
*/
int nbp_vlan_tunnel_info_delete(struct net_bridge_port *port, u16 vid)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
ASSERT_RTNL();
vg = nbp_vlan_group(port);
v = br_vlan_find(vg, vid);
if (!v)
return -ENOENT;
vlan_tunnel_info_del(vg, v);
return 0;
}
static void __vlan_tunnel_info_flush(struct net_bridge_vlan_group *vg)
{
struct net_bridge_vlan *vlan, *tmp;
list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
vlan_tunnel_info_del(vg, vlan);
}
void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port)
{
struct net_bridge_vlan_group *vg;
ASSERT_RTNL();
vg = nbp_vlan_group(port);
__vlan_tunnel_info_flush(vg);
}
int vlan_tunnel_init(struct net_bridge_vlan_group *vg)
{
return rhashtable_init(&vg->tunnel_hash, &br_vlan_tunnel_rht_params);
}
void vlan_tunnel_deinit(struct net_bridge_vlan_group *vg)
{
rhashtable_destroy(&vg->tunnel_hash);
}
int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_port *p,
struct net_bridge_vlan_group *vg)
{
struct ip_tunnel_info *tinfo = skb_tunnel_info(skb);
struct net_bridge_vlan *vlan;
if (!vg || !tinfo)
return 0;
/* if already tagged, ignore */
if (skb_vlan_tagged(skb))
return 0;
/* lookup vid, given tunnel id */
vlan = br_vlan_tunnel_lookup(&vg->tunnel_hash, tinfo->key.tun_id);
if (!vlan)
return 0;
skb_dst_drop(skb);
__vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid);
return 0;
}
int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
struct net_bridge_vlan *vlan)
{
int err;
if (!vlan || !vlan->tinfo.tunnel_id)
return 0;
if (unlikely(!skb_vlan_tag_present(skb)))
return 0;
skb_dst_drop(skb);
err = skb_vlan_pop(skb);
if (err)
return err;
skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment