Commit e21268ef authored by Vladimir Oltean's avatar Vladimir Oltean Committed by Jakub Kicinski

net: dsa: felix: perform switch setup for tag_8021q

Unlike sja1105, the only other user of the software-defined tag_8021q.c
tagger format, the implementation we choose for the Felix DSA switch
driver preserves full functionality under a vlan_filtering bridge
(i.e. IP termination works through the DSA user ports under all
circumstances).

The tag_8021q protocol just wants:
- Identifying the ingress switch port based on the RX VLAN ID, as seen
  by the CPU. We achieve this by using the TCAM engines (which are also
  used for tc-flower offload) to push the RX VLAN as a second, outer
  tag, on egress towards the CPU port.
- Steering traffic injected into the switch from the network stack
  towards the correct front port based on the TX VLAN, and consuming
  (popping) that header on the switch's egress.

A tc-flower pseudocode of the static configuration done by the driver
would look like this:

$ tc qdisc add dev <cpu-port> clsact
$ for eth in swp0 swp1 swp2 swp3; do \
	tc filter add dev <cpu-port> egress flower indev ${eth} \
		action vlan push id <rxvlan> protocol 802.1ad; \
	tc filter add dev <cpu-port> ingress protocol 802.1Q flower
		vlan_id <txvlan> action vlan pop \
		action mirred egress redirect dev ${eth}; \
done

but of course since DSA does not register network interfaces for the CPU
port, this configuration would be impossible for the user to do. Also,
due to the same reason, it is impossible for the user to inadvertently
delete these rules using tc. These rules do not collide in any way with
tc-flower, they just consume some TCAM space, which is something we can
live with.
Signed-off-by: default avatarVladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: default avatarFlorian Fainelli <f.fainelli@gmail.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 7c83a7c5
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <soc/mscc/ocelot_ana.h> #include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot_ptp.h> #include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot.h> #include <soc/mscc/ocelot.h>
#include <linux/dsa/8021q.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/packing.h> #include <linux/packing.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -24,6 +25,329 @@ ...@@ -24,6 +25,329 @@
#include <net/dsa.h> #include <net/dsa.h>
#include "felix.h" #include "felix.h"
static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
bool pvid, bool untagged)
{
struct ocelot_vcap_filter *outer_tagging_rule;
struct ocelot *ocelot = &felix->ocelot;
struct dsa_switch *ds = felix->ds;
int key_length, upstream, err;
/* We don't need to install the rxvlan into the other ports' filtering
* tables, because we're just pushing the rxvlan when sending towards
* the CPU
*/
if (!pvid)
return 0;
key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
upstream = dsa_upstream_port(ds, port);
outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter),
GFP_KERNEL);
if (!outer_tagging_rule)
return -ENOMEM;
outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
outer_tagging_rule->prio = 1;
outer_tagging_rule->id.cookie = port;
outer_tagging_rule->id.tc_offload = false;
outer_tagging_rule->block_id = VCAP_ES0;
outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
outer_tagging_rule->lookup = 0;
outer_tagging_rule->ingress_port.value = port;
outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0);
outer_tagging_rule->egress_port.value = upstream;
outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0);
outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG;
outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD;
outer_tagging_rule->action.tag_a_vid_sel = 1;
outer_tagging_rule->action.vid_a_val = vid;
err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL);
if (err)
kfree(outer_tagging_rule);
return err;
}
static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
bool pvid, bool untagged)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
struct ocelot *ocelot = &felix->ocelot;
struct dsa_switch *ds = felix->ds;
int upstream, err;
/* tag_8021q.c assumes we are implementing this via port VLAN
* membership, which we aren't. So we don't need to add any VCAP filter
* for the CPU port.
*/
if (ocelot->ports[port]->is_dsa_8021q_cpu)
return 0;
untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!untagging_rule)
return -ENOMEM;
redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!redirect_rule) {
kfree(untagging_rule);
return -ENOMEM;
}
upstream = dsa_upstream_port(ds, port);
untagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
untagging_rule->ingress_port_mask = BIT(upstream);
untagging_rule->vlan.vid.value = vid;
untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
untagging_rule->prio = 1;
untagging_rule->id.cookie = port;
untagging_rule->id.tc_offload = false;
untagging_rule->block_id = VCAP_IS1;
untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
untagging_rule->lookup = 0;
untagging_rule->action.vlan_pop_cnt_ena = true;
untagging_rule->action.vlan_pop_cnt = 1;
untagging_rule->action.pag_override_mask = 0xff;
untagging_rule->action.pag_val = port;
err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL);
if (err) {
kfree(untagging_rule);
kfree(redirect_rule);
return err;
}
redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
redirect_rule->ingress_port_mask = BIT(upstream);
redirect_rule->pag = port;
redirect_rule->prio = 1;
redirect_rule->id.cookie = port;
redirect_rule->id.tc_offload = false;
redirect_rule->block_id = VCAP_IS2;
redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
redirect_rule->lookup = 0;
redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
redirect_rule->action.port_mask = BIT(port);
err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
if (err) {
ocelot_vcap_filter_del(ocelot, untagging_rule);
kfree(redirect_rule);
return err;
}
return 0;
}
static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
u16 flags)
{
bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
struct ocelot *ocelot = ds->priv;
if (vid_is_dsa_8021q_rxvlan(vid))
return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot),
port, vid, pvid, untagged);
if (vid_is_dsa_8021q_txvlan(vid))
return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot),
port, vid, pvid, untagged);
return 0;
}
static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid)
{
struct ocelot_vcap_filter *outer_tagging_rule;
struct ocelot_vcap_block *block_vcap_es0;
struct ocelot *ocelot = &felix->ocelot;
block_vcap_es0 = &ocelot->block[VCAP_ES0];
outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
port, false);
/* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid
* installing outer tagging ES0 rules where they weren't needed.
* But in rxvlan_del, the API doesn't give us the "flags" anymore,
* so that forces us to be slightly sloppy here, and just assume that
* if we didn't find an outer_tagging_rule it means that there was
* none in the first place, i.e. rxvlan_del is called on a non-pvid
* port. This is most probably true though.
*/
if (!outer_tagging_rule)
return 0;
return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
}
static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
struct ocelot_vcap_block *block_vcap_is1;
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot *ocelot = &felix->ocelot;
int err;
if (ocelot->ports[port]->is_dsa_8021q_cpu)
return 0;
block_vcap_is1 = &ocelot->block[VCAP_IS1];
block_vcap_is2 = &ocelot->block[VCAP_IS2];
untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
port, false);
if (!untagging_rule)
return 0;
err = ocelot_vcap_filter_del(ocelot, untagging_rule);
if (err)
return err;
redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
port, false);
if (!redirect_rule)
return 0;
return ocelot_vcap_filter_del(ocelot, redirect_rule);
}
static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
struct ocelot *ocelot = ds->priv;
if (vid_is_dsa_8021q_rxvlan(vid))
return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot),
port, vid);
if (vid_is_dsa_8021q_txvlan(vid))
return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot),
port, vid);
return 0;
}
static const struct dsa_8021q_ops felix_tag_8021q_ops = {
.vlan_add = felix_tag_8021q_vlan_add,
.vlan_del = felix_tag_8021q_vlan_del,
};
/* Alternatively to using the NPI functionality, that same hardware MAC
* connected internally to the enetc or fman DSA master can be configured to
* use the software-defined tag_8021q frame format. As far as the hardware is
* concerned, it thinks it is a "dumb switch" - the queues of the CPU port
* module are now disconnected from it, but can still be accessed through
* register-based MMIO.
*/
static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
{
ocelot->ports[port]->is_dsa_8021q_cpu = true;
ocelot->npi = -1;
/* Overwrite PGID_CPU with the non-tagging port */
ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
ocelot_apply_bridge_fwd_mask(ocelot);
}
static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
{
ocelot->ports[port]->is_dsa_8021q_cpu = false;
/* Restore PGID_CPU */
ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
PGID_CPU);
ocelot_apply_bridge_fwd_mask(ocelot);
}
static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
unsigned long cpu_flood;
int port, err;
felix_8021q_cpu_port_init(ocelot, cpu);
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
/* This overwrites ocelot_init():
* Do not forward BPDU frames to the CPU port module,
* for 2 reasons:
* - When these packets are injected from the tag_8021q
* CPU port, we want them to go out, not loop back
* into the system.
* - STP traffic ingressing on a user port should go to
* the tag_8021q CPU port, not to the hardware CPU
* port module.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
ANA_PORT_CPU_FWD_BPDU_CFG, port);
}
/* In tag_8021q mode, the CPU port module is unused. So we
* want to disable flooding of any kind to the CPU port module,
* since packets going there will end in a black hole.
*/
cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
felix->dsa_8021q_ctx = kzalloc(sizeof(*felix->dsa_8021q_ctx),
GFP_KERNEL);
if (!felix->dsa_8021q_ctx)
return -ENOMEM;
felix->dsa_8021q_ctx->ops = &felix_tag_8021q_ops;
felix->dsa_8021q_ctx->proto = htons(ETH_P_8021AD);
felix->dsa_8021q_ctx->ds = ds;
err = dsa_8021q_setup(felix->dsa_8021q_ctx, true);
if (err)
goto out_free_dsa_8021_ctx;
return 0;
out_free_dsa_8021_ctx:
kfree(felix->dsa_8021q_ctx);
return err;
}
static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
int err, port;
err = dsa_8021q_setup(felix->dsa_8021q_ctx, false);
if (err)
dev_err(ds->dev, "dsa_8021q_setup returned %d", err);
kfree(felix->dsa_8021q_ctx);
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
/* Restore the logic from ocelot_init:
* do not forward BPDU frames to the front ports.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
port);
}
felix_8021q_cpu_port_deinit(ocelot, cpu);
}
/* The CPU port module is connected to the Node Processor Interface (NPI). This /* The CPU port module is connected to the Node Processor Interface (NPI). This
* is the mode through which frames can be injected from and extracted to an * is the mode through which frames can be injected from and extracted to an
* external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
...@@ -107,6 +431,9 @@ static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu, ...@@ -107,6 +431,9 @@ static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu,
case DSA_TAG_PROTO_OCELOT: case DSA_TAG_PROTO_OCELOT:
err = felix_setup_tag_npi(ds, cpu); err = felix_setup_tag_npi(ds, cpu);
break; break;
case DSA_TAG_PROTO_OCELOT_8021Q:
err = felix_setup_tag_8021q(ds, cpu);
break;
default: default:
err = -EPROTONOSUPPORT; err = -EPROTONOSUPPORT;
} }
...@@ -121,11 +448,18 @@ static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu, ...@@ -121,11 +448,18 @@ static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu,
case DSA_TAG_PROTO_OCELOT: case DSA_TAG_PROTO_OCELOT:
felix_teardown_tag_npi(ds, cpu); felix_teardown_tag_npi(ds, cpu);
break; break;
case DSA_TAG_PROTO_OCELOT_8021Q:
felix_teardown_tag_8021q(ds, cpu);
break;
default: default:
break; break;
} }
} }
/* This always leaves the switch in a consistent state, because although the
* tag_8021q setup can fail, the NPI setup can't. So either the change is made,
* or the restoration is guaranteed to work.
*/
static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
enum dsa_tag_protocol proto) enum dsa_tag_protocol proto)
{ {
...@@ -134,7 +468,8 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu, ...@@ -134,7 +468,8 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
enum dsa_tag_protocol old_proto = felix->tag_proto; enum dsa_tag_protocol old_proto = felix->tag_proto;
int err; int err;
if (proto != DSA_TAG_PROTO_OCELOT) if (proto != DSA_TAG_PROTO_OCELOT &&
proto != DSA_TAG_PROTO_OCELOT_8021Q)
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
felix_del_tag_protocol(ds, cpu, old_proto); felix_del_tag_protocol(ds, cpu, old_proto);
......
...@@ -48,6 +48,7 @@ struct felix { ...@@ -48,6 +48,7 @@ struct felix {
struct lynx_pcs **pcs; struct lynx_pcs **pcs;
resource_size_t switch_base; resource_size_t switch_base;
resource_size_t imdio_base; resource_size_t imdio_base;
struct dsa_8021q_context *dsa_8021q_ctx;
enum dsa_tag_protocol tag_proto; enum dsa_tag_protocol tag_proto;
}; };
......
...@@ -889,18 +889,60 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port, ...@@ -889,18 +889,60 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
} }
EXPORT_SYMBOL(ocelot_get_ts_info); EXPORT_SYMBOL(ocelot_get_ts_info);
static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot) static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
{ {
u32 mask = 0;
int port; int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
if (!ocelot_port)
continue;
if (ocelot_port->is_dsa_8021q_cpu)
mask |= BIT(port);
}
return mask;
}
void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
{
unsigned long cpu_fwd_mask;
int port;
/* If a DSA tag_8021q CPU exists, it needs to be included in the
* regular forwarding path of the front ports regardless of whether
* those are bridged or standalone.
* If DSA tag_8021q is not used, this returns 0, which is fine because
* the hardware-based CPU port module can be a destination for packets
* even if it isn't part of PGID_SRC.
*/
cpu_fwd_mask = ocelot_get_dsa_8021q_cpu_mask(ocelot);
/* Apply FWD mask. The loop is needed to add/remove the current port as /* Apply FWD mask. The loop is needed to add/remove the current port as
* a source for the other ports. * a source for the other ports.
*/ */
for (port = 0; port < ocelot->num_phys_ports; port++) { for (port = 0; port < ocelot->num_phys_ports; port++) {
if (ocelot->bridge_fwd_mask & BIT(port)) { struct ocelot_port *ocelot_port = ocelot->ports[port];
unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(port); unsigned long mask;
if (!ocelot_port) {
/* Unused ports can't send anywhere */
mask = 0;
} else if (ocelot_port->is_dsa_8021q_cpu) {
/* The DSA tag_8021q CPU ports need to be able to
* forward packets to all other ports except for
* themselves
*/
mask = GENMASK(ocelot->num_phys_ports - 1, 0);
mask &= ~cpu_fwd_mask;
} else if (ocelot->bridge_fwd_mask & BIT(port)) {
int lag; int lag;
mask = ocelot->bridge_fwd_mask & ~BIT(port);
for (lag = 0; lag < ocelot->num_phys_ports; lag++) { for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
unsigned long bond_mask = ocelot->lags[lag]; unsigned long bond_mask = ocelot->lags[lag];
...@@ -912,15 +954,18 @@ static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot) ...@@ -912,15 +954,18 @@ static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
break; break;
} }
} }
ocelot_write_rix(ocelot, mask,
ANA_PGID_PGID, PGID_SRC + port);
} else { } else {
ocelot_write_rix(ocelot, 0, /* Standalone ports forward only to DSA tag_8021q CPU
ANA_PGID_PGID, PGID_SRC + port); * ports (if those exist), or to the hardware CPU port
* module otherwise.
*/
mask = cpu_fwd_mask;
} }
ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port);
} }
} }
EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{ {
......
...@@ -1009,6 +1009,7 @@ ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int cookie, ...@@ -1009,6 +1009,7 @@ ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int cookie,
return NULL; return NULL;
} }
EXPORT_SYMBOL(ocelot_vcap_block_find_filter_by_id);
/* If @on=false, then SNAP, ARP, IP and OAM frames will not match on keys based /* If @on=false, then SNAP, ARP, IP and OAM frames will not match on keys based
* on destination and source MAC addresses, but only on higher-level protocol * on destination and source MAC addresses, but only on higher-level protocol
......
...@@ -14,9 +14,6 @@ ...@@ -14,9 +14,6 @@
int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
struct ocelot_vcap_filter *rule); struct ocelot_vcap_filter *rule);
struct ocelot_vcap_filter *
ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id,
bool tc_offload);
void ocelot_detect_vcap_constants(struct ocelot *ocelot); void ocelot_detect_vcap_constants(struct ocelot *ocelot);
int ocelot_vcap_init(struct ocelot *ocelot); int ocelot_vcap_init(struct ocelot *ocelot);
......
...@@ -610,6 +610,7 @@ struct ocelot_port { ...@@ -610,6 +610,7 @@ struct ocelot_port {
phy_interface_t phy_mode; phy_interface_t phy_mode;
u8 *xmit_template; u8 *xmit_template;
bool is_dsa_8021q_cpu;
}; };
struct ocelot { struct ocelot {
...@@ -760,6 +761,7 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port, ...@@ -760,6 +761,7 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port,
struct phy_device *phydev); struct phy_device *phydev);
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled); int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state); void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state);
void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot);
int ocelot_port_bridge_join(struct ocelot *ocelot, int port, int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
struct net_device *bridge); struct net_device *bridge);
int ocelot_port_bridge_leave(struct ocelot *ocelot, int port, int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
......
...@@ -693,5 +693,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot, ...@@ -693,5 +693,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
int ocelot_vcap_filter_del(struct ocelot *ocelot, int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter *rule); struct ocelot_vcap_filter *rule);
struct ocelot_vcap_filter *
ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id,
bool tc_offload);
#endif /* _OCELOT_VCAP_H_ */ #endif /* _OCELOT_VCAP_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment