Commit 15cef309 authored by David S. Miller's avatar David S. Miller

Merge branch 'aquantia-add-rx-flow-filter-support'

Igor Russkikh says:

====================
net: aquantia: add rx-flow filter support

In this patchset the rx-flow filters functionality and vlan filter offloads
are implemented.

The rules in NIC hardware have fixed order and priorities.
To support this, the locations of filters from ethtool perspective are also fixed:

* Locations 0 - 15 for VLAN ID filters
* Locations 16 - 31 for L2 EtherType and PCP filters
* Locations 32 - 39 for L3/L4 5-tuple filters (locations 32, 36 for IPv6)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4fd3e2ac 7975d2af
......@@ -36,6 +36,7 @@ atlantic-objs := aq_main.o \
aq_ring.o \
aq_hw_utils.o \
aq_ethtool.o \
aq_filters.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
......
......@@ -14,7 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/if_vlan.h>
#include "ver.h"
#include "aq_cfg.h"
#include "aq_utils.h"
......
......@@ -12,6 +12,7 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
#include "aq_vec.h"
#include "aq_filters.h"
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
......@@ -213,7 +214,36 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
case ETHTOOL_GRXRINGS:
cmd->data = cfg->vecs;
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = aq_get_rxnfc_count_all_rules(aq_nic);
break;
case ETHTOOL_GRXCLSRULE:
err = aq_get_rxnfc_rule(aq_nic, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
err = aq_get_rxnfc_all_rules(aq_nic, cmd, rule_locs);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
static int aq_ethtool_set_rxnfc(struct net_device *ndev,
struct ethtool_rxnfc *cmd)
{
int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
err = aq_add_rxnfc_rule(aq_nic, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
err = aq_del_rxnfc_rule(aq_nic, cmd);
break;
default:
err = -EOPNOTSUPP;
break;
......@@ -520,6 +550,7 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_rxfh_key_size = aq_ethtool_get_rss_key_size,
.get_rxfh = aq_ethtool_get_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
.set_rxnfc = aq_ethtool_set_rxnfc,
.get_sset_count = aq_ethtool_get_sset_count,
.get_ethtool_stats = aq_ethtool_stats,
.get_link_ksettings = aq_ethtool_get_link_ksettings,
......
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright (C) 2014-2017 aQuantia Corporation. */
/* File aq_filters.c: RX filters related functions. */
#include "aq_filters.h"
static bool __must_check
aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
{
if (fsp->flow_type & FLOW_MAC_EXT)
return false;
switch (fsp->flow_type & ~FLOW_EXT) {
case ETHER_FLOW:
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
case IPV4_FLOW:
case IPV6_FLOW:
return true;
case IP_USER_FLOW:
switch (fsp->h_u.usr_ip4_spec.proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_SCTP:
case IPPROTO_IP:
return true;
default:
return false;
}
case IPV6_USER_FLOW:
switch (fsp->h_u.usr_ip6_spec.l4_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_SCTP:
case IPPROTO_IP:
return true;
default:
return false;
}
default:
return false;
}
return false;
}
static bool __must_check
aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
struct ethtool_rx_flow_spec *fsp2)
{
if (fsp1->flow_type != fsp2->flow_type ||
memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
return false;
return true;
}
static bool __must_check
aq_rule_already_exists(struct aq_nic_s *aq_nic,
struct ethtool_rx_flow_spec *fsp)
{
struct aq_rx_filter *rule;
struct hlist_node *aq_node2;
struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
hlist_for_each_entry_safe(rule, aq_node2,
&rx_fltrs->filter_list, aq_node) {
if (rule->aq_fsp.location == fsp->location)
continue;
if (aq_match_filter(&rule->aq_fsp, fsp)) {
netdev_err(aq_nic->ndev,
"ethtool: This filter is already set\n");
return true;
}
}
return false;
}
static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
fsp->location > AQ_RX_LAST_LOC_FL3L4) {
netdev_err(aq_nic->ndev,
"ethtool: location must be in range [%d, %d]",
AQ_RX_FIRST_LOC_FL3L4,
AQ_RX_LAST_LOC_FL3L4);
return -EINVAL;
}
if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
rx_fltrs->fl3l4.is_ipv6 = false;
netdev_err(aq_nic->ndev,
"ethtool: mixing ipv4 and ipv6 is not allowed");
return -EINVAL;
} else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
rx_fltrs->fl3l4.is_ipv6 = true;
netdev_err(aq_nic->ndev,
"ethtool: mixing ipv4 and ipv6 is not allowed");
return -EINVAL;
} else if (rx_fltrs->fl3l4.is_ipv6 &&
fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
netdev_err(aq_nic->ndev,
"ethtool: The specified location for ipv6 must be %d or %d",
AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
return -EINVAL;
}
return 0;
}
static int __must_check
aq_check_approve_fl2(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
fsp->location > AQ_RX_LAST_LOC_FETHERT) {
netdev_err(aq_nic->ndev,
"ethtool: location must be in range [%d, %d]",
AQ_RX_FIRST_LOC_FETHERT,
AQ_RX_LAST_LOC_FETHERT);
return -EINVAL;
}
if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
fsp->m_u.ether_spec.h_proto == 0U) {
netdev_err(aq_nic->ndev,
"ethtool: proto (ether_type) parameter must be specfied");
return -EINVAL;
}
return 0;
}
static int __must_check
aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
fsp->location > AQ_RX_LAST_LOC_FVLANID) {
netdev_err(aq_nic->ndev,
"ethtool: location must be in range [%d, %d]",
AQ_RX_FIRST_LOC_FVLANID,
AQ_RX_LAST_LOC_FVLANID);
return -EINVAL;
}
if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
(!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
aq_nic->active_vlans))) {
netdev_err(aq_nic->ndev,
"ethtool: unknown vlan-id specified");
return -EINVAL;
}
if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
netdev_err(aq_nic->ndev,
"ethtool: queue number must be in range [0, %d]",
aq_nic->aq_nic_cfg.num_rss_queues - 1);
return -EINVAL;
}
return 0;
}
static int __must_check
aq_check_filter(struct aq_nic_s *aq_nic,
struct ethtool_rx_flow_spec *fsp)
{
int err = 0;
struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
if (fsp->flow_type & FLOW_EXT) {
if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
} else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
} else {
netdev_err(aq_nic->ndev,
"ethtool: invalid vlan mask 0x%x specified",
be16_to_cpu(fsp->m_ext.vlan_tci));
err = -EINVAL;
}
} else {
switch (fsp->flow_type & ~FLOW_EXT) {
case ETHER_FLOW:
err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
case IPV4_FLOW:
case IP_USER_FLOW:
rx_fltrs->fl3l4.is_ipv6 = false;
err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
case IPV6_FLOW:
case IPV6_USER_FLOW:
rx_fltrs->fl3l4.is_ipv6 = true;
err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
break;
default:
netdev_err(aq_nic->ndev,
"ethtool: unknown flow-type specified");
err = -EINVAL;
}
}
return err;
}
static bool __must_check
aq_rule_is_not_support(struct aq_nic_s *aq_nic,
struct ethtool_rx_flow_spec *fsp)
{
bool rule_is_not_support = false;
if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
netdev_err(aq_nic->ndev,
"ethtool: Please, to enable the RX flow control:\n"
"ethtool -K %s ntuple on\n", aq_nic->ndev->name);
rule_is_not_support = true;
} else if (!aq_rule_is_approve(fsp)) {
netdev_err(aq_nic->ndev,
"ethtool: The specified flow type is not supported\n");
rule_is_not_support = true;
} else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
(fsp->h_u.tcp_ip4_spec.tos ||
fsp->h_u.tcp_ip6_spec.tclass)) {
netdev_err(aq_nic->ndev,
"ethtool: The specified tos tclass are not supported\n");
rule_is_not_support = true;
} else if (fsp->flow_type & FLOW_MAC_EXT) {
netdev_err(aq_nic->ndev,
"ethtool: MAC_EXT is not supported");
rule_is_not_support = true;
}
return rule_is_not_support;
}
static bool __must_check
aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
struct ethtool_rx_flow_spec *fsp)
{
bool rule_is_not_correct = false;
if (!aq_nic) {
rule_is_not_correct = true;
} else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
netdev_err(aq_nic->ndev,
"ethtool: The specified number %u rule is invalid\n",
fsp->location);
rule_is_not_correct = true;
} else if (aq_check_filter(aq_nic, fsp)) {
rule_is_not_correct = true;
} else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
netdev_err(aq_nic->ndev,
"ethtool: The specified action is invalid.\n"
"Maximum allowable value action is %u.\n",
aq_nic->aq_nic_cfg.num_rss_queues - 1);
rule_is_not_correct = true;
}
}
return rule_is_not_correct;
}
static int __must_check
aq_check_rule(struct aq_nic_s *aq_nic,
struct ethtool_rx_flow_spec *fsp)
{
int err = 0;
if (aq_rule_is_not_correct(aq_nic, fsp))
err = -EINVAL;
else if (aq_rule_is_not_support(aq_nic, fsp))
err = -EOPNOTSUPP;
else if (aq_rule_already_exists(aq_nic, fsp))
err = -EEXIST;
return err;
}
static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
struct aq_rx_filter *aq_rx_fltr,
struct aq_rx_filter_l2 *data, bool add)
{
const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
memset(data, 0, sizeof(*data));
data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
data->queue = fsp->ring_cookie;
else
data->queue = -1;
data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
== VLAN_PRIO_MASK;
data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
& VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
static int aq_add_del_fether(struct aq_nic_s *aq_nic,
struct aq_rx_filter *aq_rx_fltr, bool add)
{
struct aq_rx_filter_l2 data;
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
if (unlikely(!aq_hw_ops->hw_filter_l2_set))
return -EOPNOTSUPP;
if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
return -EOPNOTSUPP;
if (add)
return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
else
return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
}
static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
{
int i;
for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
if (aq_vlans[i].enable &&
aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
aq_vlans[i].vlan_id == vlan) {
return true;
}
}
return false;
}
/* Function rebuilds array of vlan filters so that filters with assigned
* queue have a precedence over just vlans on the interface.
*/
static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
unsigned long *active_vlans,
struct aq_rx_filter_vlan *aq_vlans)
{
bool vlan_busy = false;
int vlan = -1;
int i;
for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
if (aq_vlans[i].enable &&
aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
continue;
do {
vlan = find_next_bit(active_vlans,
VLAN_N_VID,
vlan + 1);
if (vlan == VLAN_N_VID) {
aq_vlans[i].enable = 0U;
aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
aq_vlans[i].vlan_id = 0;
continue;
}
vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
if (!vlan_busy) {
aq_vlans[i].enable = 1U;
aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
aq_vlans[i].vlan_id = vlan;
}
} while (vlan_busy && vlan != VLAN_N_VID);
}
}
static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
struct aq_rx_filter *aq_rx_fltr,
struct aq_rx_filter_vlan *aq_vlans, bool add)
{
const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
int i;
memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
if (!add)
return 0;
/* remove vlan if it was in table without queue assignment */
for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
if (aq_vlans[i].vlan_id ==
(be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
aq_vlans[i].enable = false;
}
}
aq_vlans[location].location = location;
aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
& VLAN_VID_MASK;
aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
aq_vlans[location].enable = 1U;
return 0;
}
int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
{
struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
struct aq_rx_filter *rule = NULL;
struct hlist_node *aq_node2;
hlist_for_each_entry_safe(rule, aq_node2,
&rx_fltrs->filter_list, aq_node) {
if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
break;
}
if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
struct ethtool_rxnfc cmd;
cmd.fs.location = rule->aq_fsp.location;
return aq_del_rxnfc_rule(aq_nic, &cmd);
}
return -ENOENT;
}
static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
struct aq_rx_filter *aq_rx_fltr, bool add)
{
const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
return -EOPNOTSUPP;
aq_set_data_fvlan(aq_nic,
aq_rx_fltr,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
add);
return aq_filters_vlans_update(aq_nic);
}
static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
struct aq_rx_filter *aq_rx_fltr,
struct aq_rx_filter_l3l4 *data, bool add)
{
struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
memset(data, 0, sizeof(*data));
data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
if (!add) {
if (!data->is_ipv6)
rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
else
rx_fltrs->fl3l4.active_ipv6 &=
~BIT((data->location) / 4);
return 0;
}
data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
switch (fsp->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
break;
case UDP_V4_FLOW:
case UDP_V6_FLOW:
data->cmd |= HW_ATL_RX_UDP;
data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
break;
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
data->cmd |= HW_ATL_RX_SCTP;
data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
break;
default:
break;
}
if (!data->is_ipv6) {
data->ip_src[0] =
ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
data->ip_dst[0] =
ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
} else {
int i;
rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
data->ip_dst[i] =
ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
data->ip_src[i] =
ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
}
data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
}
if (fsp->flow_type != IP_USER_FLOW &&
fsp->flow_type != IPV6_USER_FLOW) {
if (!data->is_ipv6) {
data->p_dst =
ntohs(fsp->h_u.tcp_ip4_spec.pdst);
data->p_src =
ntohs(fsp->h_u.tcp_ip4_spec.psrc);
} else {
data->p_dst =
ntohs(fsp->h_u.tcp_ip6_spec.pdst);
data->p_src =
ntohs(fsp->h_u.tcp_ip6_spec.psrc);
}
}
if (data->ip_src[0] && !data->is_ipv6)
data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
if (data->ip_dst[0] && !data->is_ipv6)
data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
if (data->p_dst)
data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
if (data->p_src)
data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
} else {
data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
}
return 0;
}
static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
const struct aq_hw_ops *aq_hw_ops,
struct aq_rx_filter_l3l4 *data)
{
if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
return -EOPNOTSUPP;
return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
}
static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
struct aq_rx_filter *aq_rx_fltr, bool add)
{
const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
struct aq_rx_filter_l3l4 data;
if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4 ||
aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
return -EINVAL;
return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
}
static int aq_add_del_rule(struct aq_nic_s *aq_nic,
struct aq_rx_filter *aq_rx_fltr, bool add)
{
int err = -EINVAL;
if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
== VLAN_VID_MASK) {
aq_rx_fltr->type = aq_rx_filter_vlan;
err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
} else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
== VLAN_PRIO_MASK) {
aq_rx_fltr->type = aq_rx_filter_ethertype;
err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
}
} else {
switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
case ETHER_FLOW:
aq_rx_fltr->type = aq_rx_filter_ethertype;
err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
case IP_USER_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
case IPV6_USER_FLOW:
aq_rx_fltr->type = aq_rx_filter_l3l4;
err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
break;
default:
err = -EINVAL;
break;
}
}
return err;
}
static int aq_update_table_filters(struct aq_nic_s *aq_nic,
struct aq_rx_filter *aq_rx_fltr, u16 index,
struct ethtool_rxnfc *cmd)
{
struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
struct aq_rx_filter *rule = NULL, *parent = NULL;
struct hlist_node *aq_node2;
int err = -EINVAL;
hlist_for_each_entry_safe(rule, aq_node2,
&rx_fltrs->filter_list, aq_node) {
if (rule->aq_fsp.location >= index)
break;
parent = rule;
}
if (rule && rule->aq_fsp.location == index) {
err = aq_add_del_rule(aq_nic, rule, false);
hlist_del(&rule->aq_node);
kfree(rule);
--rx_fltrs->active_filters;
}
if (unlikely(!aq_rx_fltr))
return err;
INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
if (parent)
hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
else
hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
++rx_fltrs->active_filters;
return 0;
}
u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
{
struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
return rx_fltrs->active_filters;
}
struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
{
return &aq_nic->aq_hw_rx_fltrs;
}
int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
{
struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct aq_rx_filter *aq_rx_fltr;
int err = 0;
err = aq_check_rule(aq_nic, fsp);
if (err)
goto err_exit;
aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
if (unlikely(!aq_rx_fltr)) {
err = -ENOMEM;
goto err_exit;
}
memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
if (unlikely(err))
goto err_free;
err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
if (unlikely(err)) {
hlist_del(&aq_rx_fltr->aq_node);
--rx_fltrs->active_filters;
goto err_free;
}
return 0;
err_free:
kfree(aq_rx_fltr);
err_exit:
return err;
}
int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
{
struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
struct aq_rx_filter *rule = NULL;
struct hlist_node *aq_node2;
int err = -EINVAL;
hlist_for_each_entry_safe(rule, aq_node2,
&rx_fltrs->filter_list, aq_node) {
if (rule->aq_fsp.location == cmd->fs.location)
break;
}
if (rule && rule->aq_fsp.location == cmd->fs.location) {
err = aq_add_del_rule(aq_nic, rule, false);
hlist_del(&rule->aq_node);
kfree(rule);
--rx_fltrs->active_filters;
}
return err;
}
int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
{
struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct aq_rx_filter *rule = NULL;
struct hlist_node *aq_node2;
hlist_for_each_entry_safe(rule, aq_node2,
&rx_fltrs->filter_list, aq_node)
if (fsp->location <= rule->aq_fsp.location)
break;
if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
return -EINVAL;
memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
return 0;
}
int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
struct hlist_node *aq_node2;
struct aq_rx_filter *rule;
int count = 0;
cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
hlist_for_each_entry_safe(rule, aq_node2,
&rx_fltrs->filter_list, aq_node) {
if (unlikely(count == cmd->rule_cnt))
return -EMSGSIZE;
rule_locs[count++] = rule->aq_fsp.location;
}
cmd->rule_cnt = count;
return 0;
}
int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
{
struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
struct hlist_node *aq_node2;
struct aq_rx_filter *rule;
int err = 0;
hlist_for_each_entry_safe(rule, aq_node2,
&rx_fltrs->filter_list, aq_node) {
err = aq_add_del_rule(aq_nic, rule, false);
if (err)
goto err_exit;
hlist_del(&rule->aq_node);
kfree(rule);
--rx_fltrs->active_filters;
}
err_exit:
return err;
}
int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
{
struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
struct hlist_node *aq_node2;
struct aq_rx_filter *rule;
int err = 0;
hlist_for_each_entry_safe(rule, aq_node2,
&rx_fltrs->filter_list, aq_node) {
err = aq_add_del_rule(aq_nic, rule, true);
if (err)
goto err_exit;
}
err_exit:
return err;
}
int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
{
const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int hweight = 0;
int err = 0;
int i;
if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
return -EOPNOTSUPP;
if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
return -EOPNOTSUPP;
aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
hweight += hweight_long(aq_nic->active_vlans[i]);
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
if (err)
return err;
}
err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
);
if (err)
return err;
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
if (hweight < AQ_VLAN_MAX_FILTERS)
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true);
/* otherwise left in promiscue mode */
}
return err;
}
int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
{
const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int err = 0;
memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
return -EOPNOTSUPP;
if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
return -EOPNOTSUPP;
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
if (err)
return err;
err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
);
return err;
}
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Copyright (C) 2014-2017 aQuantia Corporation. */
/* File aq_filters.h: RX filters related functions. */
#ifndef AQ_FILTERS_H
#define AQ_FILTERS_H
#include "aq_nic.h"
enum aq_rx_filter_type {
aq_rx_filter_ethertype,
aq_rx_filter_vlan,
aq_rx_filter_l3l4
};
struct aq_rx_filter {
struct hlist_node aq_node;
enum aq_rx_filter_type type;
struct ethtool_rx_flow_spec aq_fsp;
};
u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic);
struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic);
int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd);
int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
u32 *rule_locs);
int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id);
int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic);
int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic);
int aq_filters_vlans_update(struct aq_nic_s *aq_nic);
int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic);
#endif /* AQ_FILTERS_H */
......@@ -18,6 +18,17 @@
#include "aq_rss.h"
#include "hw_atl/hw_atl_utils.h"
#define AQ_RX_FIRST_LOC_FVLANID 0U
#define AQ_RX_LAST_LOC_FVLANID 15U
#define AQ_RX_FIRST_LOC_FETHERT 16U
#define AQ_RX_LAST_LOC_FETHERT 31U
#define AQ_RX_FIRST_LOC_FL3L4 32U
#define AQ_RX_LAST_LOC_FL3L4 39U
#define AQ_RX_MAX_RXNFC_LOC AQ_RX_LAST_LOC_FL3L4
#define AQ_VLAN_MAX_FILTERS \
(AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
......@@ -130,6 +141,7 @@ struct aq_hw_s {
struct aq_ring_s;
struct aq_ring_param_s;
struct sk_buff;
struct aq_rx_filter_l3l4;
struct aq_hw_ops {
......@@ -183,6 +195,23 @@ struct aq_hw_ops {
int (*hw_packet_filter_set)(struct aq_hw_s *self,
unsigned int packet_filter);
int (*hw_filter_l3l4_set)(struct aq_hw_s *self,
struct aq_rx_filter_l3l4 *data);
int (*hw_filter_l3l4_clear)(struct aq_hw_s *self,
struct aq_rx_filter_l3l4 *data);
int (*hw_filter_l2_set)(struct aq_hw_s *self,
struct aq_rx_filter_l2 *data);
int (*hw_filter_l2_clear)(struct aq_hw_s *self,
struct aq_rx_filter_l2 *data);
int (*hw_filter_vlan_set)(struct aq_hw_s *self,
struct aq_rx_filter_vlan *aq_vlans);
int (*hw_filter_vlan_ctrl)(struct aq_hw_s *self, bool enable);
int (*hw_multicast_list_set)(struct aq_hw_s *self,
u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
......
......@@ -13,6 +13,7 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
#include "aq_filters.h"
#include <linux/netdevice.h>
#include <linux/module.h>
......@@ -49,6 +50,11 @@ static int aq_ndev_open(struct net_device *ndev)
err = aq_nic_init(aq_nic);
if (err < 0)
goto err_exit;
err = aq_reapply_rxnfc_all_rules(aq_nic);
if (err < 0)
goto err_exit;
err = aq_nic_start(aq_nic);
if (err < 0)
goto err_exit;
......@@ -101,6 +107,21 @@ static int aq_ndev_set_features(struct net_device *ndev,
bool is_lro = false;
int err = 0;
if (!(features & NETIF_F_NTUPLE)) {
if (aq_nic->ndev->features & NETIF_F_NTUPLE) {
err = aq_clear_rxnfc_all_rules(aq_nic);
if (unlikely(err))
goto err_exit;
}
}
if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
err = aq_filters_vlan_offload_off(aq_nic);
if (unlikely(err))
goto err_exit;
}
}
aq_cfg->features = features;
if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
......@@ -119,6 +140,7 @@ static int aq_ndev_set_features(struct net_device *ndev,
err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
aq_cfg);
err_exit:
return err;
}
......@@ -147,6 +169,35 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
aq_nic_set_multicast_list(aq_nic, ndev);
}
static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
u16 vid)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
return -EOPNOTSUPP;
set_bit(vid, aq_nic->active_vlans);
return aq_filters_vlans_update(aq_nic);
}
static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
u16 vid)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
return -EOPNOTSUPP;
clear_bit(vid, aq_nic->active_vlans);
if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid))
return aq_filters_vlans_update(aq_nic);
return 0;
}
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
......@@ -154,5 +205,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_rx_mode = aq_ndev_set_multicast_settings,
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features
.ndo_set_features = aq_ndev_set_features,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
};
......@@ -84,8 +84,6 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_lro = AQ_CFG_IS_LRO_DEF;
cfg->vlan_id = 0U;
aq_nic_rss_init(self, cfg->num_rss_queues);
/*descriptors */
......
......@@ -35,7 +35,6 @@ struct aq_nic_cfg_s {
u32 mtu;
u32 flow_control;
u32 link_speed_msk;
u32 vlan_id;
u32 wol;
u16 is_mc_list_enabled;
u16 mc_list_count;
......@@ -61,6 +60,23 @@ struct aq_nic_cfg_s {
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
struct aq_hw_rx_fl2 {
struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS];
};
struct aq_hw_rx_fl3l4 {
u8 active_ipv4;
u8 active_ipv6:2;
u8 is_ipv6;
};
struct aq_hw_rx_fltrs_s {
struct hlist_head filter_list;
u16 active_filters;
struct aq_hw_rx_fl2 fl2;
struct aq_hw_rx_fl3l4 fl3l4;
};
struct aq_nic_s {
atomic_t flags;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
......@@ -81,10 +97,13 @@ struct aq_nic_s {
u32 count;
u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
} mc_list;
/* Bitmask of currently assigned vlans from linux */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct pci_dev *pdev;
unsigned int msix_entry_mask;
u32 irqvecs;
struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
};
static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
......
......@@ -19,6 +19,7 @@
#include "aq_pci_func.h"
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
#include "aq_filters.h"
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
......@@ -309,6 +310,7 @@ static void aq_pci_remove(struct pci_dev *pdev)
struct aq_nic_s *self = pci_get_drvdata(pdev);
if (self->ndev) {
aq_clear_rxnfc_all_rules(self);
if (self->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(self->ndev);
aq_nic_free_vectors(self);
......
......@@ -41,7 +41,9 @@
NETIF_F_RXHASH | \
NETIF_F_SG | \
NETIF_F_TSO | \
NETIF_F_LRO, \
NETIF_F_LRO | \
NETIF_F_NTUPLE | \
NETIF_F_HW_VLAN_CTAG_FILTER, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
......@@ -319,20 +321,11 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
if (cfg->vlan_id) {
hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
hw_atl_rpf_vlan_untagged_act_set(self, 1U);
hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
} else {
hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
}
// Always accept untagged packets
hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
hw_atl_rpf_vlan_untagged_act_set(self, 1U);
/* Rx Interrupts */
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
......@@ -945,6 +938,142 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
struct aq_rx_filter_l3l4 *data)
{
u8 location = data->location;
if (!data->is_ipv6) {
hw_atl_rpfl3l4_cmd_clear(self, location);
hw_atl_rpf_l4_spd_set(self, 0U, location);
hw_atl_rpf_l4_dpd_set(self, 0U, location);
hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
} else {
int i;
for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
hw_atl_rpfl3l4_cmd_clear(self, location + i);
hw_atl_rpf_l4_spd_set(self, 0U, location + i);
hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
}
hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
struct aq_rx_filter_l3l4 *data)
{
u8 location = data->location;
hw_atl_b0_hw_fl3l4_clear(self, data);
if (data->cmd) {
if (!data->is_ipv6) {
hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
location,
data->ip_dst[0]);
hw_atl_rpfl3l4_ipv4_src_addr_set(self,
location,
data->ip_src[0]);
} else {
hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
location,
data->ip_dst);
hw_atl_rpfl3l4_ipv6_src_addr_set(self,
location,
data->ip_src);
}
}
hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
hw_atl_rpf_l4_spd_set(self, data->p_src, location);
hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
struct aq_rx_filter_l2 *data)
{
hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
hw_atl_rpf_etht_user_priority_en_set(self,
!!data->user_priority_en,
data->location);
if (data->user_priority_en)
hw_atl_rpf_etht_user_priority_set(self,
data->user_priority,
data->location);
if (data->queue < 0) {
hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
} else {
hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
struct aq_rx_filter_l2 *data)
{
hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
hw_atl_rpf_etht_flr_set(self, 0U, data->location);
hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
return aq_hw_err_from_flags(self);
}
/**
* @brief Set VLAN filter table
* @details Configure VLAN filter table to accept (and assign the queue) traffic
* for the particular vlan ids.
* Note: use this function under vlan promisc mode not to lost the traffic
*
* @param aq_hw_s
* @param aq_rx_filter_vlan VLAN filter configuration
* @return 0 - OK, <0 - error
*/
static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
struct aq_rx_filter_vlan *aq_vlans)
{
int i;
for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
if (aq_vlans[i].enable) {
hw_atl_rpf_vlan_id_flr_set(self,
aq_vlans[i].vlan_id,
i);
hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
if (aq_vlans[i].queue != 0xFF) {
hw_atl_rpf_vlan_rxq_flr_set(self,
aq_vlans[i].queue,
i);
hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
}
}
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
{
/* set promisc in case of disabing the vland filter */
hw_atl_rpf_vlan_prom_mode_en_set(self, !!!enable);
return aq_hw_err_from_flags(self);
}
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
......@@ -969,6 +1098,11 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
.hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
.hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
.hw_filter_l2_set = hw_atl_b0_hw_fl2_set,
.hw_filter_l2_clear = hw_atl_b0_hw_fl2_clear,
.hw_filter_l3l4_set = hw_atl_b0_hw_fl3l4_set,
.hw_filter_vlan_set = hw_atl_b0_hw_vlan_set,
.hw_filter_vlan_ctrl = hw_atl_b0_hw_vlan_ctrl,
.hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl_b0_hw_rss_set,
......
......@@ -898,6 +898,24 @@ void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
vlan_id_flr);
}
void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter),
HW_ATL_RPF_VL_RXQ_EN_F_MSK,
HW_ATL_RPF_VL_RXQ_EN_F_SHIFT,
vlan_rxq_en);
}
void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_F_ADR(filter),
HW_ATL_RPF_VL_RXQ_F_MSK,
HW_ATL_RPF_VL_RXQ_F_SHIFT,
vlan_rxq);
};
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter)
{
......@@ -965,6 +983,20 @@ void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
}
void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_SPD_ADR(filter),
HW_ATL_RPF_L4_SPD_MSK,
HW_ATL_RPF_L4_SPD_SHIFT, val);
}
void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_DPD_ADR(filter),
HW_ATL_RPF_L4_DPD_MSK,
HW_ATL_RPF_L4_DPD_SHIFT, val);
}
/* RPO: rx packet offload */
void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en)
......@@ -1476,3 +1508,80 @@ void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT,
up_force_intr);
}
void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
{
aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location), 0U);
}
void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
{
aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location), 0U);
}
void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location)
{
aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), 0U);
}
void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
{
int i;
for (i = 0; i < 4; ++i)
aq_hw_write_reg(aq_hw,
HW_ATL_RPF_L3_DSTA_ADR(location + i),
0U);
}
void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
{
int i;
for (i = 0; i < 4; ++i)
aq_hw_write_reg(aq_hw,
HW_ATL_RPF_L3_SRCA_ADR(location + i),
0U);
}
void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
u32 ipv4_dest)
{
aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location),
ipv4_dest);
}
void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
u32 ipv4_src)
{
aq_hw_write_reg(aq_hw,
HW_ATL_RPF_L3_SRCA_ADR(location),
ipv4_src);
}
void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd)
{
aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), cmd);
}
void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
u32 *ipv6_src)
{
int i;
for (i = 0; i < 4; ++i)
aq_hw_write_reg(aq_hw,
HW_ATL_RPF_L3_SRCA_ADR(location + i),
ipv6_src[i]);
}
void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
u32 *ipv6_dest)
{
int i;
for (i = 0; i < 4; ++i)
aq_hw_write_reg(aq_hw,
HW_ATL_RPF_L3_DSTA_ADR(location + i),
ipv6_dest[i]);
}
......@@ -441,6 +441,14 @@ void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
u32 filter);
/* Set VLAN RX queue assignment enable */
void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
u32 filter);
/* Set VLAN RX queue */
void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
u32 filter);
/* set ethertype filter enable */
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter);
......@@ -475,6 +483,12 @@ void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
/* set ethertype filter */
void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
/* set L4 source port */
void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
/* set L4 destination port */
void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
/* rpo */
/* set ipv4 header checksum offload enable */
......@@ -704,4 +718,38 @@ void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
/* set uP Force Interrupt */
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
/* clear ipv4 filter destination address */
void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
/* clear ipv4 filter source address */
void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
/* clear command for filter l3-l4 */
void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location);
/* clear ipv6 filter destination address */
void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
/* clear ipv6 filter source address */
void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
/* set ipv4 filter destination address */
void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
u32 ipv4_dest);
/* set ipv4 filter source address */
void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
u32 ipv4_src);
/* set command for filter l3-l4 */
void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd);
/* set ipv6 filter source address */
void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
u32 *ipv6_src);
/* set ipv6 filter destination address */
void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
u32 *ipv6_dest);
#endif /* HW_ATL_LLH_H */
......@@ -1092,24 +1092,43 @@
/* Default value of bitfield vl_id{F}[B:0] */
#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
/* RX et_en{F} Bitfield Definitions
* Preprocessor definitions for the bitfield "et_en{F}".
/* RX vl_rxq_en{F} Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_rxq{F}".
* Parameter: filter {F} | stride size 0x4 | range [0, 15]
* PORT="pif_rpf_et_en_i[0]"
*/
/* Register address for bitfield et_en{F} */
#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
/* Bitmask for bitfield et_en{F} */
#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
/* Inverted bitmask for bitfield et_en{F} */
#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
/* Lower bit position of bitfield et_en{F} */
#define HW_ATL_RPF_ET_EN_F_SHIFT 31
/* Width of bitfield et_en{F} */
#define HW_ATL_RPF_ET_EN_F_WIDTH 1
/* Default value of bitfield et_en{F} */
#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
* PORT="pif_rpf_vl_rxq_en_i"
*/
/* Register address for bitfield vl_rxq_en{F} */
#define HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_rxq_en{F} */
#define HW_ATL_RPF_VL_RXQ_EN_F_MSK 0x10000000
/* Inverted bitmask for bitfield vl_rxq_en{F}[ */
#define HW_ATL_RPF_VL_RXQ_EN_F_MSKN 0xEFFFFFFF
/* Lower bit position of bitfield vl_rxq_en{F} */
#define HW_ATL_RPF_VL_RXQ_EN_F_SHIFT 28
/* Width of bitfield vl_rxq_en{F} */
#define HW_ATL_RPF_VL_RXQ_EN_F_WIDTH 1
/* Default value of bitfield vl_rxq_en{F} */
#define HW_ATL_RPF_VL_RXQ_EN_F_DEFAULT 0x0
/* RX vl_rxq{F}[4:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_rxq{F}[4:0]".
* Parameter: filter {F} | stride size 0x4 | range [0, 15]
* PORT="pif_rpf_vl_rxq0_i[4:0]"
*/
/* Register address for bitfield vl_rxq{F}[4:0] */
#define HW_ATL_RPF_VL_RXQ_F_ADR(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_rxq{F}[4:0] */
#define HW_ATL_RPF_VL_RXQ_F_MSK 0x01F00000
/* Inverted bitmask for bitfield vl_rxq{F}[4:0] */
#define HW_ATL_RPF_VL_RXQ_F_MSKN 0xFE0FFFFF
/* Lower bit position of bitfield vl_rxq{F}[4:0] */
#define HW_ATL_RPF_VL_RXQ_F_SHIFT 20
/* Width of bitfield vl_rxw{F}[4:0] */
#define HW_ATL_RPF_VL_RXQ_F_WIDTH 5
/* Default value of bitfield vl_rxq{F}[4:0] */
#define HW_ATL_RPF_VL_RXQ_F_DEFAULT 0x0
/* rx et_en{f} bitfield definitions
* preprocessor definitions for the bitfield "et_en{f}".
......@@ -1263,6 +1282,44 @@
/* default value of bitfield et_val{f}[f:0] */
#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
/* RX l4_sp{D}[F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "l4_sp{D}[F:0]".
* Parameter: srcport {D} | stride size 0x4 | range [0, 7]
* PORT="pif_rpf_l4_sp0_i[15:0]"
*/
/* Register address for bitfield l4_sp{D}[F:0] */
#define HW_ATL_RPF_L4_SPD_ADR(srcport) (0x00005400u + (srcport) * 0x4)
/* Bitmask for bitfield l4_sp{D}[F:0] */
#define HW_ATL_RPF_L4_SPD_MSK 0x0000FFFFu
/* Inverted bitmask for bitfield l4_sp{D}[F:0] */
#define HW_ATL_RPF_L4_SPD_MSKN 0xFFFF0000u
/* Lower bit position of bitfield l4_sp{D}[F:0] */
#define HW_ATL_RPF_L4_SPD_SHIFT 0
/* Width of bitfield l4_sp{D}[F:0] */
#define HW_ATL_RPF_L4_SPD_WIDTH 16
/* Default value of bitfield l4_sp{D}[F:0] */
#define HW_ATL_RPF_L4_SPD_DEFAULT 0x0
/* RX l4_dp{D}[F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "l4_dp{D}[F:0]".
* Parameter: destport {D} | stride size 0x4 | range [0, 7]
* PORT="pif_rpf_l4_dp0_i[15:0]"
*/
/* Register address for bitfield l4_dp{D}[F:0] */
#define HW_ATL_RPF_L4_DPD_ADR(destport) (0x00005420u + (destport) * 0x4)
/* Bitmask for bitfield l4_dp{D}[F:0] */
#define HW_ATL_RPF_L4_DPD_MSK 0x0000FFFFu
/* Inverted bitmask for bitfield l4_dp{D}[F:0] */
#define HW_ATL_RPF_L4_DPD_MSKN 0xFFFF0000u
/* Lower bit position of bitfield l4_dp{D}[F:0] */
#define HW_ATL_RPF_L4_DPD_SHIFT 0
/* Width of bitfield l4_dp{D}[F:0] */
#define HW_ATL_RPF_L4_DPD_WIDTH 16
/* Default value of bitfield l4_dp{D}[F:0] */
#define HW_ATL_RPF_L4_DPD_DEFAULT 0x0
/* rx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
* port="pif_rpo_ipv4_chk_en_i"
......@@ -2418,4 +2475,48 @@
/* default value of bitfield uP Force Interrupt */
#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
#define HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4 0x00005380
#define HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4 0x000053B0
#define HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4 0x000053D0
#define HW_ATL_RPF_L3_REG_CTRL_ADR(location) (0x00005380 + (location) * 0x4)
/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
* Parameter: location {D} | stride size 0x4 | range [0, 7]
* PORT="pif_rpf_l3_sa0_i[31:0]"
*/
/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
#define HW_ATL_RPF_L3_SRCA_ADR(location) (0x000053B0 + (location) * 0x4)
/* Bitmask for bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
/* Inverted bitmask for bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
/* Lower bit position of bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_SHIFT 0
/* Width of bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_WIDTH 32
/* Default value of bitfield l3_sa0[1F:0] */
#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
* Parameter: location {D} | stride size 0x4 | range [0, 7]
* PORT="pif_rpf_l3_da0_i[31:0]"
*/
/* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4)
/* Bitmask for bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
/* Inverted bitmask for bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
/* Lower bit position of bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_SHIFT 0
/* Width of bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_WIDTH 32
/* Default value of bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
#endif /* HW_ATL_LLH_INTERNAL_H */
......@@ -240,6 +240,64 @@ struct __packed offload_info {
u8 buf[0];
};
enum hw_atl_rx_action_with_traffic {
HW_ATL_RX_DISCARD,
HW_ATL_RX_HOST,
};
struct aq_rx_filter_vlan {
u8 enable;
u8 location;
u16 vlan_id;
u8 queue;
};
struct aq_rx_filter_l2 {
s8 queue;
u8 location;
u8 user_priority_en;
u8 user_priority;
u16 ethertype;
};
struct aq_rx_filter_l3l4 {
u32 cmd;
u8 location;
u32 ip_dst[4];
u32 ip_src[4];
u16 p_dst;
u16 p_src;
u8 is_ipv6;
};
enum hw_atl_rx_protocol_value_l3l4 {
HW_ATL_RX_TCP,
HW_ATL_RX_UDP,
HW_ATL_RX_SCTP,
HW_ATL_RX_ICMP
};
enum hw_atl_rx_ctrl_registers_l3l4 {
HW_ATL_RX_ENABLE_MNGMNT_QUEUE_L3L4 = BIT(22),
HW_ATL_RX_ENABLE_QUEUE_L3L4 = BIT(23),
HW_ATL_RX_ENABLE_ARP_FLTR_L3 = BIT(24),
HW_ATL_RX_ENABLE_CMP_PROT_L4 = BIT(25),
HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 = BIT(26),
HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4 = BIT(27),
HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 = BIT(28),
HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3 = BIT(29),
HW_ATL_RX_ENABLE_L3_IPV6 = BIT(30),
HW_ATL_RX_ENABLE_FLTR_L3L4 = BIT(31)
};
#define HW_ATL_RX_QUEUE_FL3L4_SHIFT 8U
#define HW_ATL_RX_ACTION_FL3F4_SHIFT 16U
#define HW_ATL_RX_CNT_REG_ADDR_IPV6 4U
#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \
((location) - AQ_RX_FIRST_LOC_FL3L4)
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment