Commit b2492d50 authored by David S. Miller's avatar David S. Miller

Merge branch 'dpaa2-switch-add-mirroring-support'

Ioana Ciornei says:

====================
dpaa2-switch: add mirroring support

This patch set adds per port and per VLAN mirroring in dpaa2-switch.

The first 4 patches are just cosmetic changes. We renamed the
dpaa2_switch_acl_tbl structure into dpaa2_switch_filter_block so that we
can reuse it for filters that do not use the ACL table and reorganized
the addition of trap, redirect and drop filters into a separate
function. All this just to make for a more streamlined addition of the
support for mirroring.

The next 4 patches are actually adding the advertised support. Mirroring
rules can be added in shared blocks, the driver will replicate the same
configuration on all the switch ports part of the same block.

The last patch documents the feature, presents its behavior and
limitations and gives a couple of examples.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 88ea96f8 d1626a1c
......@@ -172,3 +172,46 @@ Example 4: Use a single shared filter block on both eth5 and eth6::
action trap
$ tc filter add block 1 ingress protocol ipv4 flower src_ip 192.168.1.1 skip_sw \
action mirred egress redirect dev eth3
Mirroring
~~~~~~~~~
The DPAA2 switch supports only per port mirroring and per VLAN mirroring.
Adding mirroring filters in shared blocks is also supported.
When using the tc-flower classifier with the 802.1q protocol, only the
''vlan_id'' key will be accepted. Mirroring based on any other fields from the
802.1q protocol will be rejected::
$ tc qdisc add dev eth8 ingress_block 1 clsact
$ tc filter add block 1 ingress protocol 802.1q flower skip_sw vlan_prio 3 action mirred egress mirror dev eth6
Error: fsl_dpaa2_switch: Only matching on VLAN ID supported.
We have an error talking to the kernel
If a mirroring VLAN filter is requested on a port, the VLAN must to be
installed on the switch port in question either using ''bridge'' or by creating
a VLAN upper device if the switch port is used as a standalone interface::
$ tc qdisc add dev eth8 ingress_block 1 clsact
$ tc filter add block 1 ingress protocol 802.1q flower skip_sw vlan_id 200 action mirred egress mirror dev eth6
Error: VLAN must be installed on the switch port.
We have an error talking to the kernel
$ bridge vlan add vid 200 dev eth8
$ tc filter add block 1 ingress protocol 802.1q flower skip_sw vlan_id 200 action mirred egress mirror dev eth6
$ ip link add link eth8 name eth8.200 type vlan id 200
$ tc filter add block 1 ingress protocol 802.1q flower skip_sw vlan_id 200 action mirred egress mirror dev eth6
Also, it should be noted that the mirrored traffic will be subject to the same
egress restrictions as any other traffic. This means that when a mirrored
packet will reach the mirror port, if the VLAN found in the packet is not
installed on the port it will get dropped.
The DPAA2 switch supports only a single mirroring destination, thus multiple
mirror rules can be installed but their ''to'' port has to be the same::
$ tc filter add block 1 ingress protocol 802.1q flower skip_sw vlan_id 200 action mirred egress mirror dev eth6
$ tc filter add block 1 ingress protocol 802.1q flower skip_sw vlan_id 100 action mirred egress mirror dev eth7
Error: fsl_dpaa2_switch: Multiple mirror ports not supported.
We have an error talking to the kernel
......@@ -111,11 +111,11 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
return 0;
}
int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
struct ethsw_core *ethsw = acl_tbl->ethsw;
struct ethsw_core *ethsw = filter_block->ethsw;
struct dpsw_acl_key *acl_key = &entry->key;
struct device *dev = ethsw->dev;
u8 *cmd_buff;
......@@ -136,7 +136,7 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
}
err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
acl_tbl->id, acl_entry_cfg);
filter_block->acl_id, acl_entry_cfg);
dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
DMA_TO_DEVICE);
......@@ -150,12 +150,13 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
return 0;
}
static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl,
struct dpaa2_switch_acl_entry *entry)
static int
dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg;
struct dpsw_acl_key *acl_key = &entry->key;
struct ethsw_core *ethsw = acl_tbl->ethsw;
struct ethsw_core *ethsw = block->ethsw;
struct device *dev = ethsw->dev;
u8 *cmd_buff;
int err;
......@@ -175,7 +176,7 @@ static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl,
}
err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle,
acl_tbl->id, acl_entry_cfg);
block->acl_id, acl_entry_cfg);
dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff),
DMA_TO_DEVICE);
......@@ -190,19 +191,19 @@ static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl,
}
static int
dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl,
dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpaa2_switch_acl_entry *tmp;
struct list_head *pos, *n;
int index = 0;
if (list_empty(&acl_tbl->entries)) {
list_add(&entry->list, &acl_tbl->entries);
if (list_empty(&block->acl_entries)) {
list_add(&entry->list, &block->acl_entries);
return index;
}
list_for_each_safe(pos, n, &acl_tbl->entries) {
list_for_each_safe(pos, n, &block->acl_entries) {
tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list);
if (entry->prio < tmp->prio)
break;
......@@ -213,13 +214,13 @@ dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl,
}
static struct dpaa2_switch_acl_entry*
dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl,
dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block,
int index)
{
struct dpaa2_switch_acl_entry *tmp;
int i = 0;
list_for_each_entry(tmp, &acl_tbl->entries, list) {
list_for_each_entry(tmp, &block->acl_entries, list) {
if (i == index)
return tmp;
++i;
......@@ -229,37 +230,38 @@ dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl,
}
static int
dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_acl_tbl *acl_tbl,
dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry,
int precedence)
{
int err;
err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
err = dpaa2_switch_acl_entry_remove(block, entry);
if (err)
return err;
entry->cfg.precedence = precedence;
return dpaa2_switch_acl_entry_add(acl_tbl, entry);
return dpaa2_switch_acl_entry_add(block, entry);
}
static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
struct dpaa2_switch_acl_entry *entry)
static int
dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpaa2_switch_acl_entry *tmp;
int index, i, precedence, err;
/* Add the new ACL entry to the linked list and get its index */
index = dpaa2_switch_acl_entry_add_to_list(acl_tbl, entry);
index = dpaa2_switch_acl_entry_add_to_list(block, entry);
/* Move up in priority the ACL entries to make space
* for the new filter.
*/
precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - acl_tbl->num_rules - 1;
precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1;
for (i = 0; i < index; i++) {
tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
precedence);
if (err)
return err;
......@@ -269,19 +271,19 @@ static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
/* Add the new entry to hardware */
entry->cfg.precedence = precedence;
err = dpaa2_switch_acl_entry_add(acl_tbl, entry);
acl_tbl->num_rules++;
err = dpaa2_switch_acl_entry_add(block, entry);
block->num_acl_rules++;
return err;
}
static struct dpaa2_switch_acl_entry *
dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl,
dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
unsigned long cookie)
{
struct dpaa2_switch_acl_entry *tmp, *n;
list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
if (tmp->cookie == cookie)
return tmp;
}
......@@ -289,13 +291,13 @@ dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl,
}
static int
dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl,
dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpaa2_switch_acl_entry *tmp, *n;
int index = 0;
list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) {
list_for_each_entry_safe(tmp, n, &block->acl_entries, list) {
if (tmp->cookie == entry->cookie)
return index;
index++;
......@@ -303,21 +305,34 @@ dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl,
return -ENOENT;
}
static struct dpaa2_switch_mirror_entry *
dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block,
unsigned long cookie)
{
struct dpaa2_switch_mirror_entry *tmp, *n;
list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) {
if (tmp->cookie == cookie)
return tmp;
}
return NULL;
}
static int
dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry)
{
struct dpaa2_switch_acl_entry *tmp;
int index, i, precedence, err;
index = dpaa2_switch_acl_entry_get_index(acl_tbl, entry);
index = dpaa2_switch_acl_entry_get_index(block, entry);
/* Remove from hardware the ACL entry */
err = dpaa2_switch_acl_entry_remove(acl_tbl, entry);
err = dpaa2_switch_acl_entry_remove(block, entry);
if (err)
return err;
acl_tbl->num_rules--;
block->num_acl_rules--;
/* Remove it from the list also */
list_del(&entry->list);
......@@ -325,8 +340,8 @@ dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
/* Move down in priority the entries over the deleted one */
precedence = entry->cfg.precedence;
for (i = index - 1; i >= 0; i--) {
tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i);
err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp,
tmp = dpaa2_switch_acl_entry_get_by_index(block, i);
err = dpaa2_switch_acl_entry_set_precedence(block, tmp,
precedence);
if (err)
return err;
......@@ -339,10 +354,10 @@ dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl,
return 0;
}
static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw,
struct flow_action_entry *cls_act,
struct dpsw_acl_result *dpsw_act,
struct netlink_ext_ack *extack)
static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw,
struct flow_action_entry *cls_act,
struct dpsw_acl_result *dpsw_act,
struct netlink_ext_ack *extack)
{
int err = 0;
......@@ -374,22 +389,110 @@ static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw,
return err;
}
int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
static int
dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_mirror_entry *entry,
u16 to, struct netlink_ext_ack *extack)
{
unsigned long block_ports = block->ports;
struct ethsw_core *ethsw = block->ethsw;
struct ethsw_port_priv *port_priv;
unsigned long ports_added = 0;
u16 vlan = entry->cfg.vlan_id;
bool mirror_port_enabled;
int err, port;
/* Setup the mirroring port */
mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
if (!mirror_port_enabled) {
err = dpsw_set_reflection_if(ethsw->mc_io, 0,
ethsw->dpsw_handle, to);
if (err)
return err;
ethsw->mirror_port = to;
}
/* Setup the same egress mirroring configuration on all the switch
* ports that share the same filter block.
*/
for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) {
port_priv = ethsw->ports[port];
/* We cannot add a per VLAN mirroring rule if the VLAN in
* question is not installed on the switch port.
*/
if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
!(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) {
NL_SET_ERR_MSG(extack,
"VLAN must be installed on the switch port");
err = -EINVAL;
goto err_remove_filters;
}
err = dpsw_if_add_reflection(ethsw->mc_io, 0,
ethsw->dpsw_handle,
port, &entry->cfg);
if (err)
goto err_remove_filters;
ports_added |= BIT(port);
}
list_add(&entry->list, &block->mirror_entries);
return 0;
err_remove_filters:
for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) {
dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
port, &entry->cfg);
}
if (!mirror_port_enabled)
ethsw->mirror_port = ethsw->sw_attr.num_ifs;
return err;
}
static int
dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_mirror_entry *entry)
{
struct dpsw_reflection_cfg *cfg = &entry->cfg;
unsigned long block_ports = block->ports;
struct ethsw_core *ethsw = block->ethsw;
int port;
/* Remove this mirroring configuration from all the ports belonging to
* the filter block.
*/
for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs)
dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
port, cfg);
/* Also remove it from the list of mirror filters */
list_del(&entry->list);
kfree(entry);
/* If this was the last mirror filter, then unset the mirror port */
if (list_empty(&block->mirror_entries))
ethsw->mirror_port = ethsw->sw_attr.num_ifs;
return 0;
}
static int
dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block,
struct flow_cls_offload *cls)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct netlink_ext_ack *extack = cls->common.extack;
struct ethsw_core *ethsw = acl_tbl->ethsw;
struct dpaa2_switch_acl_entry *acl_entry;
struct ethsw_core *ethsw = block->ethsw;
struct flow_action_entry *act;
int err;
if (!flow_offload_has_one_action(&rule->action)) {
NL_SET_ERR_MSG(extack, "Only singular actions are supported");
return -EOPNOTSUPP;
}
if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
if (dpaa2_switch_acl_tbl_is_full(block)) {
NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
return -ENOMEM;
}
......@@ -403,15 +506,15 @@ int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
goto free_acl_entry;
act = &rule->action.entries[0];
err = dpaa2_switch_tc_parse_action(ethsw, act,
&acl_entry->cfg.result, extack);
err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
&acl_entry->cfg.result, extack);
if (err)
goto free_acl_entry;
acl_entry->prio = cls->common.prio;
acl_entry->cookie = cls->cookie;
err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
if (err)
goto free_acl_entry;
......@@ -423,33 +526,171 @@ int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
return err;
}
int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
struct flow_cls_offload *cls)
static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
u16 *vlan)
{
struct dpaa2_switch_acl_entry *entry;
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = cls->common.extack;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_VLAN))) {
NL_SET_ERR_MSG_MOD(extack,
"Mirroring is supported only per VLAN");
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
if (!entry)
return 0;
if (match.mask->vlan_priority != 0 ||
match.mask->vlan_dei != 0) {
NL_SET_ERR_MSG_MOD(extack,
"Only matching on VLAN ID supported");
return -EOPNOTSUPP;
}
return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
if (match.mask->vlan_id != 0xFFF) {
NL_SET_ERR_MSG_MOD(extack,
"Masked matching not supported");
return -EOPNOTSUPP;
}
*vlan = (u16)match.key->vlan_id;
}
return 0;
}
int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
struct tc_cls_matchall_offload *cls)
static int
dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block,
struct flow_cls_offload *cls)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct ethsw_core *ethsw = acl_tbl->ethsw;
struct dpaa2_switch_acl_entry *acl_entry;
struct flow_action_entry *act;
struct dpaa2_switch_mirror_entry *mirror_entry;
struct ethsw_core *ethsw = block->ethsw;
struct dpaa2_switch_mirror_entry *tmp;
struct flow_action_entry *cls_act;
struct list_head *pos, *n;
bool mirror_port_enabled;
u16 if_id, vlan;
int err;
if (!flow_offload_has_one_action(&cls->rule->action)) {
mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
cls_act = &cls->rule->action.entries[0];
/* Offload rules only when the destination is a DPAA2 switch port */
if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Destination not a DPAA2 switch port");
return -EOPNOTSUPP;
}
if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
/* We have a single mirror port but can configure egress mirroring on
* all the other switch ports. We need to allow mirroring rules only
* when the destination port is the same.
*/
if (mirror_port_enabled && ethsw->mirror_port != if_id) {
NL_SET_ERR_MSG_MOD(extack,
"Multiple mirror ports not supported");
return -EBUSY;
}
/* Parse the key */
err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan);
if (err)
return err;
/* Make sure that we don't already have a mirror rule with the same
* configuration.
*/
list_for_each_safe(pos, n, &block->mirror_entries) {
tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN &&
tmp->cfg.vlan_id == vlan) {
NL_SET_ERR_MSG_MOD(extack,
"VLAN mirror filter already installed");
return -EBUSY;
}
}
mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
if (!mirror_entry)
return -ENOMEM;
mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN;
mirror_entry->cfg.vlan_id = vlan;
mirror_entry->cookie = cls->cookie;
return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
extack);
}
int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
struct flow_cls_offload *cls)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct netlink_ext_ack *extack = cls->common.extack;
struct flow_action_entry *act;
if (!flow_offload_has_one_action(&rule->action)) {
NL_SET_ERR_MSG(extack, "Only singular actions are supported");
return -EOPNOTSUPP;
}
if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) {
act = &rule->action.entries[0];
switch (act->id) {
case FLOW_ACTION_REDIRECT:
case FLOW_ACTION_TRAP:
case FLOW_ACTION_DROP:
return dpaa2_switch_cls_flower_replace_acl(block, cls);
case FLOW_ACTION_MIRRED:
return dpaa2_switch_cls_flower_replace_mirror(block, cls);
default:
NL_SET_ERR_MSG_MOD(extack, "Action not supported");
return -EOPNOTSUPP;
}
}
int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
struct flow_cls_offload *cls)
{
struct dpaa2_switch_mirror_entry *mirror_entry;
struct dpaa2_switch_acl_entry *acl_entry;
/* If this filter is a an ACL one, remove it */
acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
cls->cookie);
if (acl_entry)
return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry);
/* If not, then it has to be a mirror */
mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
cls->cookie);
if (mirror_entry)
return dpaa2_switch_block_remove_mirror(block,
mirror_entry);
return 0;
}
static int
dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *cls)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct ethsw_core *ethsw = block->ethsw;
struct dpaa2_switch_acl_entry *acl_entry;
struct flow_action_entry *act;
int err;
if (dpaa2_switch_acl_tbl_is_full(block)) {
NL_SET_ERR_MSG(extack, "Maximum filter capacity reached");
return -ENOMEM;
}
......@@ -459,15 +700,15 @@ int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
return -ENOMEM;
act = &cls->rule->action.entries[0];
err = dpaa2_switch_tc_parse_action(ethsw, act,
&acl_entry->cfg.result, extack);
err = dpaa2_switch_tc_parse_action_acl(ethsw, act,
&acl_entry->cfg.result, extack);
if (err)
goto free_acl_entry;
acl_entry->prio = cls->common.prio;
acl_entry->cookie = cls->cookie;
err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry);
err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry);
if (err)
goto free_acl_entry;
......@@ -479,14 +720,159 @@ int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
return err;
}
int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
static int
dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *cls)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct dpaa2_switch_mirror_entry *mirror_entry;
struct ethsw_core *ethsw = block->ethsw;
struct dpaa2_switch_mirror_entry *tmp;
struct flow_action_entry *cls_act;
struct list_head *pos, *n;
bool mirror_port_enabled;
u16 if_id;
mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs);
cls_act = &cls->rule->action.entries[0];
/* Offload rules only when the destination is a DPAA2 switch port */
if (!dpaa2_switch_port_dev_check(cls_act->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Destination not a DPAA2 switch port");
return -EOPNOTSUPP;
}
if_id = dpaa2_switch_get_index(ethsw, cls_act->dev);
/* We have a single mirror port but can configure egress mirroring on
* all the other switch ports. We need to allow mirroring rules only
* when the destination port is the same.
*/
if (mirror_port_enabled && ethsw->mirror_port != if_id) {
NL_SET_ERR_MSG_MOD(extack,
"Multiple mirror ports not supported");
return -EBUSY;
}
/* Make sure that we don't already have a mirror rule with the same
* configuration. One matchall rule per block is the maximum.
*/
list_for_each_safe(pos, n, &block->mirror_entries) {
tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list);
if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) {
NL_SET_ERR_MSG_MOD(extack,
"Matchall mirror filter already installed");
return -EBUSY;
}
}
mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL);
if (!mirror_entry)
return -ENOMEM;
mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL;
mirror_entry->cookie = cls->cookie;
return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id,
extack);
}
int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *cls)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct flow_action_entry *act;
if (!flow_offload_has_one_action(&cls->rule->action)) {
NL_SET_ERR_MSG(extack, "Only singular actions are supported");
return -EOPNOTSUPP;
}
act = &cls->rule->action.entries[0];
switch (act->id) {
case FLOW_ACTION_REDIRECT:
case FLOW_ACTION_TRAP:
case FLOW_ACTION_DROP:
return dpaa2_switch_cls_matchall_replace_acl(block, cls);
case FLOW_ACTION_MIRRED:
return dpaa2_switch_cls_matchall_replace_mirror(block, cls);
default:
NL_SET_ERR_MSG_MOD(extack, "Action not supported");
return -EOPNOTSUPP;
}
}
int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
struct ethsw_port_priv *port_priv)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct dpaa2_switch_mirror_entry *tmp;
int err;
list_for_each_entry(tmp, &block->mirror_entries, list) {
err = dpsw_if_add_reflection(ethsw->mc_io, 0,
ethsw->dpsw_handle,
port_priv->idx, &tmp->cfg);
if (err)
goto unwind_add;
}
return 0;
unwind_add:
list_for_each_entry(tmp, &block->mirror_entries, list)
dpsw_if_remove_reflection(ethsw->mc_io, 0,
ethsw->dpsw_handle,
port_priv->idx, &tmp->cfg);
return err;
}
int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
struct ethsw_port_priv *port_priv)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct dpaa2_switch_mirror_entry *tmp;
int err;
list_for_each_entry(tmp, &block->mirror_entries, list) {
err = dpsw_if_remove_reflection(ethsw->mc_io, 0,
ethsw->dpsw_handle,
port_priv->idx, &tmp->cfg);
if (err)
goto unwind_remove;
}
return 0;
unwind_remove:
list_for_each_entry(tmp, &block->mirror_entries, list)
dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle,
port_priv->idx, &tmp->cfg);
return err;
}
int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *cls)
{
struct dpaa2_switch_acl_entry *entry;
struct dpaa2_switch_mirror_entry *mirror_entry;
struct dpaa2_switch_acl_entry *acl_entry;
/* If this filter is a an ACL one, remove it */
acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block,
cls->cookie);
if (acl_entry)
return dpaa2_switch_acl_tbl_remove_entry(block,
acl_entry);
entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie);
if (!entry)
return 0;
/* If not, then it has to be a mirror */
mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block,
cls->cookie);
if (mirror_entry)
return dpaa2_switch_block_remove_mirror(block,
mirror_entry);
return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry);
return 0;
}
......@@ -41,14 +41,14 @@ static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *e
return NULL;
}
static struct dpaa2_switch_acl_tbl *
dpaa2_switch_acl_tbl_get_unused(struct ethsw_core *ethsw)
static struct dpaa2_switch_filter_block *
dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw)
{
int i;
for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
if (!ethsw->acls[i].in_use)
return &ethsw->acls[i];
if (!ethsw->filter_blocks[i].in_use)
return &ethsw->filter_blocks[i];
return NULL;
}
......@@ -1127,28 +1127,28 @@ static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb,
}
static int
dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl,
dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block,
struct flow_cls_offload *f)
{
switch (f->command) {
case FLOW_CLS_REPLACE:
return dpaa2_switch_cls_flower_replace(acl_tbl, f);
return dpaa2_switch_cls_flower_replace(filter_block, f);
case FLOW_CLS_DESTROY:
return dpaa2_switch_cls_flower_destroy(acl_tbl, f);
return dpaa2_switch_cls_flower_destroy(filter_block, f);
default:
return -EOPNOTSUPP;
}
}
static int
dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl,
dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *f)
{
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
return dpaa2_switch_cls_matchall_replace(acl_tbl, f);
return dpaa2_switch_cls_matchall_replace(block, f);
case TC_CLSMATCHALL_DESTROY:
return dpaa2_switch_cls_matchall_destroy(acl_tbl, f);
return dpaa2_switch_cls_matchall_destroy(block, f);
default:
return -EOPNOTSUPP;
}
......@@ -1170,106 +1170,122 @@ static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
static LIST_HEAD(dpaa2_switch_block_cb_list);
static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
struct dpaa2_switch_acl_tbl *acl_tbl)
static int
dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
struct dpaa2_switch_filter_block *block)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct net_device *netdev = port_priv->netdev;
struct dpsw_acl_if_cfg acl_if_cfg;
int err;
if (port_priv->acl_tbl)
if (port_priv->filter_block)
return -EINVAL;
acl_if_cfg.if_id[0] = port_priv->idx;
acl_if_cfg.num_ifs = 1;
err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
acl_tbl->id, &acl_if_cfg);
block->acl_id, &acl_if_cfg);
if (err) {
netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
return err;
}
acl_tbl->ports |= BIT(port_priv->idx);
port_priv->acl_tbl = acl_tbl;
block->ports |= BIT(port_priv->idx);
port_priv->filter_block = block;
return 0;
}
static int
dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
struct dpaa2_switch_acl_tbl *acl_tbl)
struct dpaa2_switch_filter_block *block)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct net_device *netdev = port_priv->netdev;
struct dpsw_acl_if_cfg acl_if_cfg;
int err;
if (port_priv->acl_tbl != acl_tbl)
if (port_priv->filter_block != block)
return -EINVAL;
acl_if_cfg.if_id[0] = port_priv->idx;
acl_if_cfg.num_ifs = 1;
err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
acl_tbl->id, &acl_if_cfg);
block->acl_id, &acl_if_cfg);
if (err) {
netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
return err;
}
acl_tbl->ports &= ~BIT(port_priv->idx);
port_priv->acl_tbl = NULL;
block->ports &= ~BIT(port_priv->idx);
port_priv->filter_block = NULL;
return 0;
}
static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
struct dpaa2_switch_acl_tbl *acl_tbl)
struct dpaa2_switch_filter_block *block)
{
struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl;
struct dpaa2_switch_filter_block *old_block = port_priv->filter_block;
int err;
/* Offload all the mirror entries found in the block on this new port
* joining it.
*/
err = dpaa2_switch_block_offload_mirror(block, port_priv);
if (err)
return err;
/* If the port is already bound to this ACL table then do nothing. This
* can happen when this port is the first one to join a tc block
*/
if (port_priv->acl_tbl == acl_tbl)
if (port_priv->filter_block == block)
return 0;
err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl);
err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block);
if (err)
return err;
/* Mark the previous ACL table as being unused if this was the last
* port that was using it.
*/
if (old_acl_tbl->ports == 0)
old_acl_tbl->in_use = false;
if (old_block->ports == 0)
old_block->in_use = false;
return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
return dpaa2_switch_port_acl_tbl_bind(port_priv, block);
}
static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
struct dpaa2_switch_acl_tbl *acl_tbl)
static int
dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
struct dpaa2_switch_filter_block *block)
{
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct dpaa2_switch_acl_tbl *new_acl_tbl;
struct dpaa2_switch_filter_block *new_block;
int err;
/* Unoffload all the mirror entries found in the block from the
* port leaving it.
*/
err = dpaa2_switch_block_unoffload_mirror(block, port_priv);
if (err)
return err;
/* We are the last port that leaves a block (an ACL table).
* We'll continue to use this table.
*/
if (acl_tbl->ports == BIT(port_priv->idx))
if (block->ports == BIT(port_priv->idx))
return 0;
err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl);
err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block);
if (err)
return err;
if (acl_tbl->ports == 0)
acl_tbl->in_use = false;
if (block->ports == 0)
block->in_use = false;
new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
new_acl_tbl->in_use = true;
return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl);
new_block = dpaa2_switch_filter_block_get_unused(ethsw);
new_block->in_use = true;
return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block);
}
static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
......@@ -1277,7 +1293,7 @@ static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct dpaa2_switch_acl_tbl *acl_tbl;
struct dpaa2_switch_filter_block *filter_block;
struct flow_block_cb *block_cb;
bool register_block = false;
int err;
......@@ -1287,24 +1303,24 @@ static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
ethsw);
if (!block_cb) {
/* If the ACL table is not already known, then this port must
* be the first to join it. In this case, we can just continue
* to use our private table
/* If the filter block is not already known, then this port
* must be the first to join it. In this case, we can just
* continue to use our private table
*/
acl_tbl = port_priv->acl_tbl;
filter_block = port_priv->filter_block;
block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
ethsw, acl_tbl, NULL);
ethsw, filter_block, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
register_block = true;
} else {
acl_tbl = flow_block_cb_priv(block_cb);
filter_block = flow_block_cb_priv(block_cb);
}
flow_block_cb_incref(block_cb);
err = dpaa2_switch_port_block_bind(port_priv, acl_tbl);
err = dpaa2_switch_port_block_bind(port_priv, filter_block);
if (err)
goto err_block_bind;
......@@ -1327,7 +1343,7 @@ static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct dpaa2_switch_acl_tbl *acl_tbl;
struct dpaa2_switch_filter_block *filter_block;
struct flow_block_cb *block_cb;
int err;
......@@ -1337,8 +1353,8 @@ static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
if (!block_cb)
return;
acl_tbl = flow_block_cb_priv(block_cb);
err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl);
filter_block = flow_block_cb_priv(block_cb);
err = dpaa2_switch_port_block_unbind(port_priv, filter_block);
if (!err && !flow_block_cb_decref(block_cb)) {
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
......@@ -2991,7 +3007,7 @@ static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
acl_entry.cfg.precedence = 0;
acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
return dpaa2_switch_acl_entry_add(port_priv->acl_tbl, &acl_entry);
return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry);
}
static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
......@@ -3004,7 +3020,7 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
};
struct net_device *netdev = port_priv->netdev;
struct ethsw_core *ethsw = port_priv->ethsw_data;
struct dpaa2_switch_acl_tbl *acl_tbl;
struct dpaa2_switch_filter_block *filter_block;
struct dpsw_fdb_cfg fdb_cfg = {0};
struct dpsw_if_attr dpsw_if_attr;
struct dpaa2_switch_fdb *fdb;
......@@ -3059,14 +3075,15 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
return err;
}
acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw);
acl_tbl->ethsw = ethsw;
acl_tbl->id = acl_tbl_id;
acl_tbl->in_use = true;
acl_tbl->num_rules = 0;
INIT_LIST_HEAD(&acl_tbl->entries);
filter_block = dpaa2_switch_filter_block_get_unused(ethsw);
filter_block->ethsw = ethsw;
filter_block->acl_id = acl_tbl_id;
filter_block->in_use = true;
filter_block->num_acl_rules = 0;
INIT_LIST_HEAD(&filter_block->acl_entries);
INIT_LIST_HEAD(&filter_block->mirror_entries);
err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl);
err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block);
if (err)
return err;
......@@ -3120,7 +3137,7 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
}
kfree(ethsw->fdbs);
kfree(ethsw->acls);
kfree(ethsw->filter_blocks);
kfree(ethsw->ports);
dpaa2_switch_takedown(sw_dev);
......@@ -3248,9 +3265,10 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
goto err_free_ports;
}
ethsw->acls = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->acls),
GFP_KERNEL);
if (!ethsw->acls) {
ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs,
sizeof(*ethsw->filter_blocks),
GFP_KERNEL);
if (!ethsw->filter_blocks) {
err = -ENOMEM;
goto err_free_fdbs;
}
......@@ -3281,6 +3299,11 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
if (err)
goto err_stop;
/* By convention, if the mirror port is equal to the number of switch
* interfaces, then mirroring of any kind is disabled.
*/
ethsw->mirror_port = ethsw->sw_attr.num_ifs;
/* Register the netdev only when the entire setup is done and the
* switch port interfaces are ready to receive traffic
*/
......@@ -3303,7 +3326,7 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
err_free_netdev:
for (i--; i >= 0; i--)
free_netdev(ethsw->ports[i]->netdev);
kfree(ethsw->acls);
kfree(ethsw->filter_blocks);
err_free_fdbs:
kfree(ethsw->fdbs);
err_free_ports:
......
......@@ -113,20 +113,29 @@ struct dpaa2_switch_acl_entry {
struct dpsw_acl_key key;
};
struct dpaa2_switch_acl_tbl {
struct list_head entries;
struct dpaa2_switch_mirror_entry {
struct list_head list;
struct dpsw_reflection_cfg cfg;
unsigned long cookie;
u16 if_id;
};
struct dpaa2_switch_filter_block {
struct ethsw_core *ethsw;
u64 ports;
u16 id;
u8 num_rules;
bool in_use;
struct list_head acl_entries;
u16 acl_id;
u8 num_acl_rules;
struct list_head mirror_entries;
};
static inline bool
dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl)
dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_filter_block *filter_block)
{
if ((acl_tbl->num_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
if ((filter_block->num_acl_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES)
return true;
return false;
......@@ -149,7 +158,7 @@ struct ethsw_port_priv {
bool ucast_flood;
bool learn_ena;
struct dpaa2_switch_acl_tbl *acl_tbl;
struct dpaa2_switch_filter_block *filter_block;
};
/* Switch data */
......@@ -175,7 +184,8 @@ struct ethsw_core {
int napi_users;
struct dpaa2_switch_fdb *fdbs;
struct dpaa2_switch_acl_tbl *acls;
struct dpaa2_switch_filter_block *filter_blocks;
u16 mirror_port;
};
static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw,
......@@ -229,18 +239,24 @@ typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv,
/* TC offload */
int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
struct flow_cls_offload *cls);
int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
struct flow_cls_offload *cls);
int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl,
int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *cls);
int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl,
int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
struct tc_cls_matchall_offload *cls);
int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl,
int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *block,
struct dpaa2_switch_acl_entry *entry);
int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
struct ethsw_port_priv *port_priv);
int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
struct ethsw_port_priv *port_priv);
#endif /* __ETHSW_H */
......@@ -39,11 +39,16 @@
#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022)
#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_V2(0x034)
#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037)
#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038)
#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
......@@ -533,5 +538,19 @@ struct dpsw_cmd_acl_entry {
__le64 pad2[4];
__le64 key_iova;
};
struct dpsw_cmd_set_reflection_if {
__le16 if_id;
};
#define DPSW_FILTER_SHIFT 0
#define DPSW_FILTER_SIZE 2
struct dpsw_cmd_if_reflection {
__le16 if_id;
__le16 vlan_id;
/* only 2 bits from the LSB */
u8 filter;
};
#pragma pack(pop)
#endif /* __FSL_DPSW_CMD_H */
......@@ -1579,3 +1579,83 @@ int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpsw_set_reflection_if() - Set target interface for traffic mirrored
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPSW object
* @if_id: Interface Id
*
* Only one mirroring destination is allowed per switch
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u16 if_id)
{
struct dpsw_cmd_set_reflection_if *cmd_params;
struct fsl_mc_command cmd = { 0 };
cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF,
cmd_flags,
token);
cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params;
cmd_params->if_id = cpu_to_le16(if_id);
return mc_send_command(mc_io, &cmd);
}
/**
* dpsw_if_add_reflection() - Setup mirroring rule
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPSW object
* @if_id: Interface Identifier
* @cfg: Reflection configuration
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u16 if_id, const struct dpsw_reflection_cfg *cfg)
{
struct dpsw_cmd_if_reflection *cmd_params;
struct fsl_mc_command cmd = { 0 };
cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION,
cmd_flags,
token);
cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
cmd_params->if_id = cpu_to_le16(if_id);
cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
return mc_send_command(mc_io, &cmd);
}
/**
* dpsw_if_remove_reflection() - Remove mirroring rule
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPSW object
* @if_id: Interface Identifier
* @cfg: Reflection configuration
*
* Return: Completion status. '0' on Success; Error code otherwise.
*/
int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u16 if_id, const struct dpsw_reflection_cfg *cfg)
{
struct dpsw_cmd_if_reflection *cmd_params;
struct fsl_mc_command cmd = { 0 };
cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION,
cmd_flags,
token);
cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params;
cmd_params->if_id = cpu_to_le16(if_id);
cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
dpsw_set_field(cmd_params->filter, FILTER, cfg->filter);
return mc_send_command(mc_io, &cmd);
}
......@@ -752,4 +752,35 @@ int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
/**
* enum dpsw_reflection_filter - Filter type for frames to be reflected
* @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames
* @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames that belong to
* the particular VLAN defined by vid parameter
*
*/
enum dpsw_reflection_filter {
DPSW_REFLECTION_FILTER_INGRESS_ALL = 0,
DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1
};
/**
* struct dpsw_reflection_cfg - Structure representing the mirroring config
* @filter: Filter type for frames to be mirrored
* @vlan_id: VLAN ID to mirror; valid only when the type is DPSW_INGRESS_VLAN
*/
struct dpsw_reflection_cfg {
enum dpsw_reflection_filter filter;
u16 vlan_id;
};
int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u16 if_id);
int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u16 if_id, const struct dpsw_reflection_cfg *cfg);
int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u16 if_id, const struct dpsw_reflection_cfg *cfg);
#endif /* __FSL_DPSW_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment