Commit fb9bb704 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-ntuple-fuilter-support'

Michael Chan says:

====================
bnxt_en: Add basic ntuple filter support

The current driver only supports ntuple filters added by aRFS.  This
patch series adds basic support for user defined TCP/UDP ntuple filters
added by the user using ethtool.  Many of the patches are refactoring
patches to make the existing code more general to support both aRFS
and user defined filters.  aRFS filters always have the Toeplitz hash
value from the NIC.  A Toepliz hash function is added in patch 5 to
get the same hash value for user defined filters.  The hash is used
to store all ntuple filters in the table and all filters must be
hashed identically using the same function and key.

v2: Fix compile error in patch #4 when CONFIG_BNXT_SRIOV is disabled.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8a48a2dc 8d7ba028
...@@ -4199,15 +4199,24 @@ static void bnxt_init_vnics(struct bnxt *bp) ...@@ -4199,15 +4199,24 @@ static void bnxt_init_vnics(struct bnxt *bp)
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) { if (bp->vnic_info[i].rss_hash_key) {
if (i == 0) if (!i) {
u8 *key = (void *)vnic->rss_hash_key;
int k;
bp->toeplitz_prefix = 0;
get_random_bytes(vnic->rss_hash_key, get_random_bytes(vnic->rss_hash_key,
HW_HASH_KEY_SIZE); HW_HASH_KEY_SIZE);
else for (k = 0; k < 8; k++) {
bp->toeplitz_prefix <<= 8;
bp->toeplitz_prefix |= key[k];
}
} else {
memcpy(vnic->rss_hash_key, memcpy(vnic->rss_hash_key,
bp->vnic_info[0].rss_hash_key, bp->vnic_info[0].rss_hash_key,
HW_HASH_KEY_SIZE); HW_HASH_KEY_SIZE);
} }
} }
}
} }
static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
...@@ -4789,9 +4798,8 @@ static void bnxt_clear_ring_indices(struct bnxt *bp) ...@@ -4789,9 +4798,8 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
} }
} }
static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{ {
#ifdef CONFIG_RFS_ACCEL
int i; int i;
/* Under rtnl_lock and all our NAPIs have been disabled. It's /* Under rtnl_lock and all our NAPIs have been disabled. It's
...@@ -4803,40 +4811,73 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) ...@@ -4803,40 +4811,73 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
struct bnxt_ntuple_filter *fltr; struct bnxt_ntuple_filter *fltr;
head = &bp->ntp_fltr_hash_tbl[i]; head = &bp->ntp_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, hash) { hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
hlist_del(&fltr->hash); bnxt_del_l2_filter(bp, fltr->l2_fltr);
if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
continue;
hlist_del(&fltr->base.hash);
clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
bp->ntp_fltr_count--;
kfree(fltr); kfree(fltr);
} }
} }
if (irq_reinit) { if (!all)
return;
bitmap_free(bp->ntp_fltr_bmap); bitmap_free(bp->ntp_fltr_bmap);
bp->ntp_fltr_bmap = NULL; bp->ntp_fltr_bmap = NULL;
}
bp->ntp_fltr_count = 0; bp->ntp_fltr_count = 0;
#endif
} }
static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
{ {
#ifdef CONFIG_RFS_ACCEL
int i, rc = 0; int i, rc = 0;
if (!(bp->flags & BNXT_FLAG_RFS)) if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
return 0; return 0;
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0; bp->ntp_fltr_count = 0;
bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL); bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL);
if (!bp->ntp_fltr_bmap) if (!bp->ntp_fltr_bmap)
rc = -ENOMEM; rc = -ENOMEM;
return rc; return rc;
#else }
return 0;
#endif static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
{
int i;
for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) {
struct hlist_head *head;
struct hlist_node *tmp;
struct bnxt_l2_filter *fltr;
head = &bp->l2_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
continue;
hlist_del(&fltr->base.hash);
if (fltr->base.flags) {
clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
bp->ntp_fltr_count--;
}
kfree(fltr);
}
}
}
static void bnxt_init_l2_fltr_tbl(struct bnxt *bp)
{
int i;
for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++)
INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]);
get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed));
} }
static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
...@@ -4846,7 +4887,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) ...@@ -4846,7 +4887,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_rx_rings(bp); bnxt_free_rx_rings(bp);
bnxt_free_cp_rings(bp); bnxt_free_cp_rings(bp);
bnxt_free_all_cp_arrays(bp); bnxt_free_all_cp_arrays(bp);
bnxt_free_ntp_fltrs(bp, irq_re_init); bnxt_free_ntp_fltrs(bp, false);
bnxt_free_l2_filters(bp, false);
if (irq_re_init) { if (irq_re_init) {
bnxt_free_ring_stats(bp); bnxt_free_ring_stats(bp);
if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
...@@ -5290,25 +5332,301 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) ...@@ -5290,25 +5332,301 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
return hwrm_req_send_silent(bp, req); return hwrm_req_send_silent(bp, req);
} }
void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
{
if (!atomic_dec_and_test(&fltr->refcnt))
return;
spin_lock_bh(&bp->ntp_fltr_lock);
if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
spin_unlock_bh(&bp->ntp_fltr_lock);
return;
}
hlist_del_rcu(&fltr->base.hash);
if (fltr->base.flags) {
clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
bp->ntp_fltr_count--;
}
spin_unlock_bh(&bp->ntp_fltr_lock);
kfree_rcu(fltr, base.rcu);
}
static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp,
struct bnxt_l2_key *key,
u32 idx)
{
struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx];
struct bnxt_l2_filter *fltr;
hlist_for_each_entry_rcu(fltr, head, base.hash) {
struct bnxt_l2_key *l2_key = &fltr->l2_key;
if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
l2_key->vlan == key->vlan)
return fltr;
}
return NULL;
}
static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp,
struct bnxt_l2_key *key,
u32 idx)
{
struct bnxt_l2_filter *fltr = NULL;
rcu_read_lock();
fltr = __bnxt_lookup_l2_filter(bp, key, idx);
if (fltr)
atomic_inc(&fltr->refcnt);
rcu_read_unlock();
return fltr;
}
#define BNXT_IPV4_4TUPLE(bp, fkeys) \
(((fkeys)->basic.ip_proto == IPPROTO_TCP && \
(bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \
((fkeys)->basic.ip_proto == IPPROTO_UDP && \
(bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4))
#define BNXT_IPV6_4TUPLE(bp, fkeys) \
(((fkeys)->basic.ip_proto == IPPROTO_TCP && \
(bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \
((fkeys)->basic.ip_proto == IPPROTO_UDP && \
(bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6))
static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys)
{
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
if (BNXT_IPV4_4TUPLE(bp, fkeys))
return sizeof(fkeys->addrs.v4addrs) +
sizeof(fkeys->ports);
if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
return sizeof(fkeys->addrs.v4addrs);
}
if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
if (BNXT_IPV6_4TUPLE(bp, fkeys))
return sizeof(fkeys->addrs.v6addrs) +
sizeof(fkeys->ports);
if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
return sizeof(fkeys->addrs.v6addrs);
}
return 0;
}
static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys,
const unsigned char *key)
{
u64 prefix = bp->toeplitz_prefix, hash = 0;
struct bnxt_ipv4_tuple tuple4;
struct bnxt_ipv6_tuple tuple6;
int i, j, len = 0;
u8 *four_tuple;
len = bnxt_get_rss_flow_tuple_len(bp, fkeys);
if (!len)
return 0;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
tuple4.v4addrs = fkeys->addrs.v4addrs;
tuple4.ports = fkeys->ports;
four_tuple = (unsigned char *)&tuple4;
} else {
tuple6.v6addrs = fkeys->addrs.v6addrs;
tuple6.ports = fkeys->ports;
four_tuple = (unsigned char *)&tuple6;
}
for (i = 0, j = 8; i < len; i++, j++) {
u8 byte = four_tuple[i];
int bit;
for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) {
if (byte & 0x80)
hash ^= prefix;
}
prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0;
}
/* The valid part of the hash is in the upper 32 bits. */
return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK;
}
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, static struct bnxt_l2_filter *
bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key)
{
struct bnxt_l2_filter *fltr;
u32 idx;
idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
BNXT_L2_FLTR_HASH_MASK;
fltr = bnxt_lookup_l2_filter(bp, key, idx);
return fltr;
}
#endif
static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
struct bnxt_l2_key *key, u32 idx)
{
struct hlist_head *head;
ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
fltr->l2_key.vlan = key->vlan;
fltr->base.type = BNXT_FLTR_TYPE_L2;
if (fltr->base.flags) {
int bit_id;
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
BNXT_MAX_FLTR, 0);
if (bit_id < 0)
return -ENOMEM;
fltr->base.sw_id = (u16)bit_id;
}
head = &bp->l2_fltr_hash_tbl[idx];
hlist_add_head_rcu(&fltr->base.hash, head);
set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
atomic_set(&fltr->refcnt, 1);
return 0;
}
static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
struct bnxt_l2_key *key,
gfp_t gfp)
{
struct bnxt_l2_filter *fltr;
u32 idx;
int rc;
idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
BNXT_L2_FLTR_HASH_MASK;
fltr = bnxt_lookup_l2_filter(bp, key, idx);
if (fltr)
return fltr;
fltr = kzalloc(sizeof(*fltr), gfp);
if (!fltr)
return ERR_PTR(-ENOMEM);
spin_lock_bh(&bp->ntp_fltr_lock);
rc = bnxt_init_l2_filter(bp, fltr, key, idx);
spin_unlock_bh(&bp->ntp_fltr_lock);
if (rc) {
bnxt_del_l2_filter(bp, fltr);
fltr = ERR_PTR(rc);
}
return fltr;
}
static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
{
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &pf->vf[vf_idx];
return vf->fw_fid;
#else
return INVALID_HW_RING_ID;
#endif
}
int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr)
{
struct hwrm_cfa_l2_filter_free_input *req;
u16 target_id = 0xffff;
int rc;
if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
struct bnxt_pf_info *pf = &bp->pf;
if (fltr->base.vf_idx >= pf->active_vfs)
return -EINVAL;
target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
if (target_id == INVALID_HW_RING_ID)
return -EINVAL;
}
rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
if (rc)
return rc;
req->target_id = cpu_to_le16(target_id);
req->l2_filter_id = fltr->base.filter_id;
return hwrm_req_send(bp, req);
}
int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
{
struct hwrm_cfa_l2_filter_alloc_output *resp;
struct hwrm_cfa_l2_filter_alloc_input *req;
u16 target_id = 0xffff;
int rc;
if (fltr->base.flags & BNXT_ACT_FUNC_DST) {
struct bnxt_pf_info *pf = &bp->pf;
if (fltr->base.vf_idx >= pf->active_vfs)
return -EINVAL;
target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx);
}
rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
if (rc)
return rc;
req->target_id = cpu_to_le16(target_id);
req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
req->flags |=
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
req->enables =
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
eth_broadcast_addr(req->l2_addr_mask);
if (fltr->l2_key.vlan) {
req->enables |=
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
req->num_vlans = 1;
req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
req->l2_ivlan_mask = cpu_to_le16(0xfff);
}
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc) {
fltr->base.filter_id = resp->l2_filter_id;
set_bit(BNXT_FLTR_VALID, &fltr->base.state);
}
hwrm_req_drop(bp, req);
return rc;
}
int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr) struct bnxt_ntuple_filter *fltr)
{ {
struct hwrm_cfa_ntuple_filter_free_input *req; struct hwrm_cfa_ntuple_filter_free_input *req;
int rc; int rc;
set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state);
rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
if (rc) if (rc)
return rc; return rc;
req->ntuple_filter_id = fltr->filter_id; req->ntuple_filter_id = fltr->base.filter_id;
return hwrm_req_send(bp, req); return hwrm_req_send(bp, req);
} }
#define BNXT_NTP_FLTR_FLAGS \ #define BNXT_NTP_FLTR_FLAGS \
(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
...@@ -5324,12 +5642,21 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, ...@@ -5324,12 +5642,21 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
#define BNXT_NTP_TUNNEL_FLTR_FLAG \ #define BNXT_NTP_TUNNEL_FLTR_FLAG \
CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, void bnxt_fill_ipv6_mask(__be32 mask[4])
{
int i;
for (i = 0; i < 4; i++)
mask[i] = cpu_to_be32(~0);
}
int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr) struct bnxt_ntuple_filter *fltr)
{ {
struct hwrm_cfa_ntuple_filter_alloc_output *resp; struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct hwrm_cfa_ntuple_filter_alloc_input *req; struct hwrm_cfa_ntuple_filter_alloc_input *req;
struct flow_keys *keys = &fltr->fkeys; struct flow_keys *keys = &fltr->fkeys;
struct bnxt_l2_filter *l2_fltr;
struct bnxt_vnic_info *vnic; struct bnxt_vnic_info *vnic;
u32 flags = 0; u32 flags = 0;
int rc; int rc;
...@@ -5338,122 +5665,111 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, ...@@ -5338,122 +5665,111 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
if (rc) if (rc)
return rc; return rc;
req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; l2_fltr = fltr->l2_fltr;
req->l2_filter_id = l2_fltr->base.filter_id;
if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
req->dst_id = cpu_to_le16(fltr->rxq); req->dst_id = cpu_to_le16(fltr->base.rxq);
} else { } else {
vnic = &bp->vnic_info[fltr->rxq + 1]; vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id); req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
} }
req->flags = cpu_to_le32(flags); req->flags = cpu_to_le32(flags);
req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
req->ethertype = htons(ETH_P_IP); req->ethertype = htons(ETH_P_IP);
memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req->ip_protocol = keys->basic.ip_proto; req->ip_protocol = keys->basic.ip_proto;
if (keys->basic.n_proto == htons(ETH_P_IPV6)) { if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
int i;
req->ethertype = htons(ETH_P_IPV6); req->ethertype = htons(ETH_P_IPV6);
req->ip_addr_type = req->ip_addr_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
*(struct in6_addr *)&req->src_ipaddr[0] = *(struct in6_addr *)&req->src_ipaddr[0] =
keys->addrs.v6addrs.src; keys->addrs.v6addrs.src;
bnxt_fill_ipv6_mask(req->src_ipaddr_mask);
}
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
*(struct in6_addr *)&req->dst_ipaddr[0] = *(struct in6_addr *)&req->dst_ipaddr[0] =
keys->addrs.v6addrs.dst; keys->addrs.v6addrs.dst;
for (i = 0; i < 4; i++) { bnxt_fill_ipv6_mask(req->dst_ipaddr_mask);
req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
} }
} else { } else {
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
req->src_ipaddr[0] = keys->addrs.v4addrs.src; req->src_ipaddr[0] = keys->addrs.v4addrs.src;
req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
}
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
} }
}
if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
req->tunnel_type = req->tunnel_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
} }
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
req->src_port = keys->ports.src; req->src_port = keys->ports.src;
req->src_port_mask = cpu_to_be16(0xffff); req->src_port_mask = cpu_to_be16(0xffff);
}
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
req->dst_port = keys->ports.dst; req->dst_port = keys->ports.dst;
req->dst_port_mask = cpu_to_be16(0xffff); req->dst_port_mask = cpu_to_be16(0xffff);
}
resp = hwrm_req_hold(bp, req); resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req); rc = hwrm_req_send(bp, req);
if (!rc) if (!rc)
fltr->filter_id = resp->ntuple_filter_id; fltr->base.filter_id = resp->ntuple_filter_id;
hwrm_req_drop(bp, req); hwrm_req_drop(bp, req);
return rc; return rc;
} }
#endif
static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
const u8 *mac_addr) const u8 *mac_addr)
{ {
struct hwrm_cfa_l2_filter_alloc_output *resp; struct bnxt_l2_filter *fltr;
struct hwrm_cfa_l2_filter_alloc_input *req; struct bnxt_l2_key key;
int rc; int rc;
rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); ether_addr_copy(key.dst_mac_addr, mac_addr);
if (rc) key.vlan = 0;
return rc; fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL);
if (IS_ERR(fltr))
req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); return PTR_ERR(fltr);
if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
req->flags |=
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
req->enables =
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
memcpy(req->l2_addr, mac_addr, ETH_ALEN);
req->l2_addr_mask[0] = 0xff;
req->l2_addr_mask[1] = 0xff;
req->l2_addr_mask[2] = 0xff;
req->l2_addr_mask[3] = 0xff;
req->l2_addr_mask[4] = 0xff;
req->l2_addr_mask[5] = 0xff;
resp = hwrm_req_hold(bp, req); fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id;
rc = hwrm_req_send(bp, req); rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
if (!rc) if (rc)
bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = bnxt_del_l2_filter(bp, fltr);
resp->l2_filter_id; else
hwrm_req_drop(bp, req); bp->vnic_info[vnic_id].l2_filters[idx] = fltr;
return rc; return rc;
} }
static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
{ {
struct hwrm_cfa_l2_filter_free_input *req;
u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
int rc; int rc = 0;
/* Any associated ntuple filters will also be cleared by firmware. */ /* Any associated ntuple filters will also be cleared by firmware. */
rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
if (rc)
return rc;
hwrm_req_hold(bp, req);
for (i = 0; i < num_of_vnics; i++) { for (i = 0; i < num_of_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
for (j = 0; j < vnic->uc_filter_count; j++) { for (j = 0; j < vnic->uc_filter_count; j++) {
req->l2_filter_id = vnic->fw_l2_filter_id[j]; struct bnxt_l2_filter *fltr = vnic->l2_filters[j];
rc = hwrm_req_send(bp, req); bnxt_hwrm_l2_filter_free(bp, fltr);
bnxt_del_l2_filter(bp, fltr);
} }
vnic->uc_filter_count = 0; vnic->uc_filter_count = 0;
} }
hwrm_req_drop(bp, req);
return rc; return rc;
} }
...@@ -9370,7 +9686,6 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) ...@@ -9370,7 +9686,6 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
static int bnxt_alloc_rfs_vnics(struct bnxt *bp) static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{ {
#ifdef CONFIG_RFS_ACCEL
int i, rc = 0; int i, rc = 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
...@@ -9399,9 +9714,6 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) ...@@ -9399,9 +9714,6 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
break; break;
} }
return rc; return rc;
#else
return 0;
#endif
} }
/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
...@@ -9729,7 +10041,6 @@ static int bnxt_setup_int_mode(struct bnxt *bp) ...@@ -9729,7 +10041,6 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc; return rc;
} }
#ifdef CONFIG_RFS_ACCEL
static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
{ {
return bp->hw_resc.max_rsscos_ctxs; return bp->hw_resc.max_rsscos_ctxs;
...@@ -9739,7 +10050,6 @@ static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) ...@@ -9739,7 +10050,6 @@ static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
{ {
return bp->hw_resc.max_vnics; return bp->hw_resc.max_vnics;
} }
#endif
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{ {
...@@ -11742,7 +12052,6 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) ...@@ -11742,7 +12052,6 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
{ {
struct net_device *dev = bp->dev; struct net_device *dev = bp->dev;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
struct hwrm_cfa_l2_filter_free_input *req;
struct netdev_hw_addr *ha; struct netdev_hw_addr *ha;
int i, off = 0, rc; int i, off = 0, rc;
bool uc_update; bool uc_update;
...@@ -11754,16 +12063,12 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) ...@@ -11754,16 +12063,12 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
if (!uc_update) if (!uc_update)
goto skip_uc; goto skip_uc;
rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
if (rc)
return rc;
hwrm_req_hold(bp, req);
for (i = 1; i < vnic->uc_filter_count; i++) { for (i = 1; i < vnic->uc_filter_count; i++) {
req->l2_filter_id = vnic->fw_l2_filter_id[i]; struct bnxt_l2_filter *fltr = vnic->l2_filters[i];
rc = hwrm_req_send(bp, req); bnxt_hwrm_l2_filter_free(bp, fltr);
bnxt_del_l2_filter(bp, fltr);
} }
hwrm_req_drop(bp, req);
vnic->uc_filter_count = 1; vnic->uc_filter_count = 1;
...@@ -11858,7 +12163,6 @@ static bool bnxt_rfs_supported(struct bnxt *bp) ...@@ -11858,7 +12163,6 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
/* If runtime conditions support RFS */ /* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp) static bool bnxt_rfs_capable(struct bnxt *bp)
{ {
#ifdef CONFIG_RFS_ACCEL
int vnics, max_vnics, max_rss_ctxs; int vnics, max_vnics, max_rss_ctxs;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
...@@ -11894,9 +12198,6 @@ static bool bnxt_rfs_capable(struct bnxt *bp) ...@@ -11894,9 +12198,6 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
return false; return false;
#else
return false;
#endif
} }
static netdev_features_t bnxt_fix_features(struct net_device *dev, static netdev_features_t bnxt_fix_features(struct net_device *dev,
...@@ -13559,38 +13860,102 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, ...@@ -13559,38 +13860,102 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
} }
} }
#ifdef CONFIG_RFS_ACCEL u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
const struct sk_buff *skb)
{
struct bnxt_vnic_info *vnic;
if (skb)
return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
vnic = &bp->vnic_info[0];
return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
}
int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
u32 idx)
{
struct hlist_head *head;
int bit_id;
spin_lock_bh(&bp->ntp_fltr_lock);
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0);
if (bit_id < 0) {
spin_unlock_bh(&bp->ntp_fltr_lock);
return -ENOMEM;
}
fltr->base.sw_id = (u16)bit_id;
fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
fltr->base.flags |= BNXT_ACT_RING_DST;
head = &bp->ntp_fltr_hash_tbl[idx];
hlist_add_head_rcu(&fltr->base.hash, head);
set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
bp->ntp_fltr_count++;
spin_unlock_bh(&bp->ntp_fltr_lock);
return 0;
}
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2) struct bnxt_ntuple_filter *f2)
{ {
struct flow_keys *keys1 = &f1->fkeys; struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys; struct flow_keys *keys2 = &f2->fkeys;
if (f1->ntuple_flags != f2->ntuple_flags)
return false;
if (keys1->basic.n_proto != keys2->basic.n_proto || if (keys1->basic.n_proto != keys2->basic.n_proto ||
keys1->basic.ip_proto != keys2->basic.ip_proto) keys1->basic.ip_proto != keys2->basic.ip_proto)
return false; return false;
if (keys1->basic.n_proto == htons(ETH_P_IP)) { if (keys1->basic.n_proto == htons(ETH_P_IP)) {
if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) ||
((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst))
return false; return false;
} else { } else {
if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
sizeof(keys1->addrs.v6addrs.src)) || memcmp(&keys1->addrs.v6addrs.src,
memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, &keys2->addrs.v6addrs.src,
sizeof(keys1->addrs.v6addrs.dst))) sizeof(keys1->addrs.v6addrs.src))) ||
((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
memcmp(&keys1->addrs.v6addrs.dst,
&keys2->addrs.v6addrs.dst,
sizeof(keys1->addrs.v6addrs.dst))))
return false; return false;
} }
if (keys1->ports.ports == keys2->ports.ports && if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) &&
keys1->control.flags == keys2->control.flags && keys1->ports.src != keys2->ports.src) ||
ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) &&
ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) keys1->ports.dst != keys2->ports.dst))
return false;
if (keys1->control.flags == keys2->control.flags &&
f1->l2_fltr == f2->l2_fltr)
return true; return true;
return false; return false;
} }
struct bnxt_ntuple_filter *
bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr, u32 idx)
{
struct bnxt_ntuple_filter *f;
struct hlist_head *head;
head = &bp->ntp_fltr_hash_tbl[idx];
hlist_for_each_entry_rcu(f, head, base.hash) {
if (bnxt_fltr_match(f, fltr))
return f;
}
return NULL;
}
#ifdef CONFIG_RFS_ACCEL
static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id) u16 rxq_index, u32 flow_id)
{ {
...@@ -13598,29 +13963,31 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -13598,29 +13963,31 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct bnxt_ntuple_filter *fltr, *new_fltr; struct bnxt_ntuple_filter *fltr, *new_fltr;
struct flow_keys *fkeys; struct flow_keys *fkeys;
struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
int rc = 0, idx, bit_id, l2_idx = 0; struct bnxt_l2_filter *l2_fltr;
struct hlist_head *head; int rc = 0, idx;
u32 flags; u32 flags;
if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; l2_fltr = bp->vnic_info[0].l2_filters[0];
int off = 0, j; atomic_inc(&l2_fltr->refcnt);
} else {
struct bnxt_l2_key key;
netif_addr_lock_bh(dev); ether_addr_copy(key.dst_mac_addr, eth->h_dest);
for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { key.vlan = 0;
if (ether_addr_equal(eth->h_dest, l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key);
vnic->uc_list + off)) { if (!l2_fltr)
l2_idx = j + 1;
break;
}
}
netif_addr_unlock_bh(dev);
if (!l2_idx)
return -EINVAL; return -EINVAL;
if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) {
bnxt_del_l2_filter(bp, l2_fltr);
return -EINVAL;
}
} }
new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
if (!new_fltr) if (!new_fltr) {
bnxt_del_l2_filter(bp, l2_fltr);
return -ENOMEM; return -ENOMEM;
}
fkeys = &new_fltr->fkeys; fkeys = &new_fltr->fkeys;
if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
...@@ -13647,46 +14014,48 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -13647,46 +14014,48 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
goto err_free; goto err_free;
} }
memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); new_fltr->l2_fltr = l2_fltr;
memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL;
idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
head = &bp->ntp_fltr_hash_tbl[idx];
rcu_read_lock(); rcu_read_lock();
hlist_for_each_entry_rcu(fltr, head, hash) { fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
if (bnxt_fltr_match(fltr, new_fltr)) { if (fltr) {
rc = fltr->sw_id;
rcu_read_unlock(); rcu_read_unlock();
rc = fltr->base.sw_id;
goto err_free; goto err_free;
} }
}
rcu_read_unlock(); rcu_read_unlock();
spin_lock_bh(&bp->ntp_fltr_lock);
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
BNXT_NTP_FLTR_MAX_FLTR, 0);
if (bit_id < 0) {
spin_unlock_bh(&bp->ntp_fltr_lock);
rc = -ENOMEM;
goto err_free;
}
new_fltr->sw_id = (u16)bit_id;
new_fltr->flow_id = flow_id; new_fltr->flow_id = flow_id;
new_fltr->l2_fltr_idx = l2_idx; new_fltr->base.rxq = rxq_index;
new_fltr->rxq = rxq_index; rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
hlist_add_head_rcu(&new_fltr->hash, head); if (!rc) {
bp->ntp_fltr_count++;
spin_unlock_bh(&bp->ntp_fltr_lock);
bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
return new_fltr->base.sw_id;
return new_fltr->sw_id; }
err_free: err_free:
bnxt_del_l2_filter(bp, l2_fltr);
kfree(new_fltr); kfree(new_fltr);
return rc; return rc;
} }
#endif
void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
{
spin_lock_bh(&bp->ntp_fltr_lock);
if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) {
spin_unlock_bh(&bp->ntp_fltr_lock);
return;
}
hlist_del_rcu(&fltr->base.hash);
bp->ntp_fltr_count--;
spin_unlock_bh(&bp->ntp_fltr_lock);
bnxt_del_l2_filter(bp, fltr->l2_fltr);
clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
kfree_rcu(fltr, base.rcu);
}
static void bnxt_cfg_ntp_filters(struct bnxt *bp) static void bnxt_cfg_ntp_filters(struct bnxt *bp)
{ {
...@@ -13699,13 +14068,15 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) ...@@ -13699,13 +14068,15 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
int rc; int rc;
head = &bp->ntp_fltr_hash_tbl[i]; head = &bp->ntp_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, hash) { hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
bool del = false; bool del = false;
if (test_bit(BNXT_FLTR_VALID, &fltr->state)) { if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
if (rps_may_expire_flow(bp->dev, fltr->rxq, if (fltr->base.flags & BNXT_ACT_NO_AGING)
continue;
if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
fltr->flow_id, fltr->flow_id,
fltr->sw_id)) { fltr->base.sw_id)) {
bnxt_hwrm_cfa_ntuple_filter_free(bp, bnxt_hwrm_cfa_ntuple_filter_free(bp,
fltr); fltr);
del = true; del = true;
...@@ -13716,32 +14087,17 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) ...@@ -13716,32 +14087,17 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
if (rc) if (rc)
del = true; del = true;
else else
set_bit(BNXT_FLTR_VALID, &fltr->state); set_bit(BNXT_FLTR_VALID, &fltr->base.state);
} }
if (del) { if (del)
spin_lock_bh(&bp->ntp_fltr_lock); bnxt_del_ntp_filter(bp, fltr);
hlist_del_rcu(&fltr->hash);
bp->ntp_fltr_count--;
spin_unlock_bh(&bp->ntp_fltr_lock);
synchronize_rcu();
clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
kfree(fltr);
}
} }
} }
if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
netdev_info(bp->dev, "Receive PF driver unload event!\n"); netdev_info(bp->dev, "Receive PF driver unload event!\n");
} }
#else
static void bnxt_cfg_ntp_filters(struct bnxt *bp)
{
}
#endif /* CONFIG_RFS_ACCEL */
static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int entry, struct udp_tunnel_info *ti) unsigned int entry, struct udp_tunnel_info *ti)
{ {
...@@ -13900,6 +14256,8 @@ static void bnxt_remove_one(struct pci_dev *pdev) ...@@ -13900,6 +14256,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_ptp_clear(bp); bnxt_ptp_clear(bp);
unregister_netdev(dev); unregister_netdev(dev);
bnxt_free_l2_filters(bp, true);
bnxt_free_ntp_fltrs(bp, true);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */ /* Flush any pending tasks */
cancel_work_sync(&bp->sp_task); cancel_work_sync(&bp->sp_task);
...@@ -14449,6 +14807,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -14449,6 +14807,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) if (rc)
goto init_err_pci_clean; goto init_err_pci_clean;
bnxt_init_l2_fltr_tbl(bp);
bnxt_set_rx_skb_mode(bp, false); bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp); bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp); bnxt_set_ring_params(bp);
......
...@@ -1219,7 +1219,7 @@ struct bnxt_vnic_info { ...@@ -1219,7 +1219,7 @@ struct bnxt_vnic_info {
u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC]; u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id; u16 fw_l2_ctx_id;
#define BNXT_MAX_UC_ADDRS 4 #define BNXT_MAX_UC_ADDRS 4
__le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS]; struct bnxt_l2_filter *l2_filters[BNXT_MAX_UC_ADDRS];
/* index 0 always dev_addr */ /* index 0 always dev_addr */
u16 uc_filter_count; u16 uc_filter_count;
u8 *uc_list; u8 *uc_list;
...@@ -1332,19 +1332,71 @@ struct bnxt_pf_info { ...@@ -1332,19 +1332,71 @@ struct bnxt_pf_info {
struct bnxt_vf_info *vf; struct bnxt_vf_info *vf;
}; };
struct bnxt_ntuple_filter { struct bnxt_filter_base {
struct hlist_node hash; struct hlist_node hash;
u8 dst_mac_addr[ETH_ALEN];
u8 src_mac_addr[ETH_ALEN];
struct flow_keys fkeys;
__le64 filter_id; __le64 filter_id;
u8 type;
#define BNXT_FLTR_TYPE_NTUPLE 1
#define BNXT_FLTR_TYPE_L2 2
u8 flags;
#define BNXT_ACT_DROP 1
#define BNXT_ACT_RING_DST 2
#define BNXT_ACT_FUNC_DST 4
#define BNXT_ACT_NO_AGING 8
u16 sw_id; u16 sw_id;
u8 l2_fltr_idx;
u16 rxq; u16 rxq;
u32 flow_id; u16 fw_vnic_id;
u16 vf_idx;
unsigned long state; unsigned long state;
#define BNXT_FLTR_VALID 0 #define BNXT_FLTR_VALID 0
#define BNXT_FLTR_UPDATE 1 #define BNXT_FLTR_INSERTED 1
#define BNXT_FLTR_FW_DELETED 2
struct rcu_head rcu;
};
struct bnxt_ntuple_filter {
struct bnxt_filter_base base;
struct flow_keys fkeys;
struct bnxt_l2_filter *l2_fltr;
u32 ntuple_flags;
#define BNXT_NTUPLE_MATCH_SRC_IP 1
#define BNXT_NTUPLE_MATCH_DST_IP 2
#define BNXT_NTUPLE_MATCH_SRC_PORT 4
#define BNXT_NTUPLE_MATCH_DST_PORT 8
#define BNXT_NTUPLE_MATCH_ALL (BNXT_NTUPLE_MATCH_SRC_IP | \
BNXT_NTUPLE_MATCH_DST_IP | \
BNXT_NTUPLE_MATCH_SRC_PORT | \
BNXT_NTUPLE_MATCH_DST_PORT)
u32 flow_id;
};
struct bnxt_l2_key {
union {
struct {
u8 dst_mac_addr[ETH_ALEN];
u16 vlan;
};
u32 filter_key;
};
};
struct bnxt_ipv4_tuple {
struct flow_dissector_key_ipv4_addrs v4addrs;
struct flow_dissector_key_ports ports;
};
struct bnxt_ipv6_tuple {
struct flow_dissector_key_ipv6_addrs v6addrs;
struct flow_dissector_key_ports ports;
};
#define BNXT_L2_KEY_SIZE (sizeof(struct bnxt_l2_key) / 4)
struct bnxt_l2_filter {
struct bnxt_filter_base base;
struct bnxt_l2_key l2_key;
atomic_t refcnt;
}; };
struct bnxt_link_info { struct bnxt_link_info {
...@@ -2367,6 +2419,7 @@ struct bnxt { ...@@ -2367,6 +2419,7 @@ struct bnxt {
int db_size; int db_size;
#define BNXT_NTP_FLTR_MAX_FLTR 4096 #define BNXT_NTP_FLTR_MAX_FLTR 4096
#define BNXT_MAX_FLTR (BNXT_NTP_FLTR_MAX_FLTR + BNXT_L2_FLTR_MAX_FLTR)
#define BNXT_NTP_FLTR_HASH_SIZE 512 #define BNXT_NTP_FLTR_HASH_SIZE 512
#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1) #define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE]; struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
...@@ -2375,6 +2428,14 @@ struct bnxt { ...@@ -2375,6 +2428,14 @@ struct bnxt {
unsigned long *ntp_fltr_bmap; unsigned long *ntp_fltr_bmap;
int ntp_fltr_count; int ntp_fltr_count;
#define BNXT_L2_FLTR_MAX_FLTR 1024
#define BNXT_L2_FLTR_HASH_SIZE 32
#define BNXT_L2_FLTR_HASH_MASK (BNXT_L2_FLTR_HASH_SIZE - 1)
struct hlist_head l2_fltr_hash_tbl[BNXT_L2_FLTR_HASH_SIZE];
u32 hash_seed;
u64 toeplitz_prefix;
/* To protect link related settings during link changes and /* To protect link related settings during link changes and
* ethtool settings changes. * ethtool settings changes.
*/ */
...@@ -2582,6 +2643,14 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); ...@@ -2582,6 +2643,14 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only); int bmap_size, bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp); int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr);
int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr);
int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr);
int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
void bnxt_fill_ipv6_mask(__be32 mask[4]);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings); int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
...@@ -2625,6 +2694,13 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, ...@@ -2625,6 +2694,13 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int bnxt_fw_init_one(struct bnxt *bp); int bnxt_fw_init_one(struct bnxt *bp);
bool bnxt_hwrm_reset_permitted(struct bnxt *bp); bool bnxt_hwrm_reset_permitted(struct bnxt *bp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
struct bnxt_ntuple_filter *bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr, u32 idx);
u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
const struct sk_buff *skb);
int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
u32 idx);
void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
int bnxt_restore_pf_fw_resources(struct bnxt *bp); int bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_get_port_parent_id(struct net_device *dev, int bnxt_get_port_parent_id(struct net_device *dev,
......
...@@ -1011,29 +1011,60 @@ static int bnxt_set_channels(struct net_device *dev, ...@@ -1011,29 +1011,60 @@ static int bnxt_set_channels(struct net_device *dev,
return rc; return rc;
} }
#ifdef CONFIG_RFS_ACCEL static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[],
static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, int tbl_size, u32 *ids, u32 start,
u32 *rule_locs) u32 id_cnt)
{ {
int i, j = 0; int i, j = start;
cmd->data = bp->ntp_fltr_count; if (j >= id_cnt)
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { return j;
for (i = 0; i < tbl_size; i++) {
struct hlist_head *head; struct hlist_head *head;
struct bnxt_ntuple_filter *fltr; struct bnxt_filter_base *fltr;
head = &bp->ntp_fltr_hash_tbl[i]; head = &tbl[i];
rcu_read_lock();
hlist_for_each_entry_rcu(fltr, head, hash) { hlist_for_each_entry_rcu(fltr, head, hash) {
if (j == cmd->rule_cnt) if (!fltr->flags ||
break; test_bit(BNXT_FLTR_FW_DELETED, &fltr->state))
rule_locs[j++] = fltr->sw_id; continue;
ids[j++] = fltr->sw_id;
if (j == id_cnt)
return j;
} }
rcu_read_unlock();
if (j == cmd->rule_cnt)
break;
} }
cmd->rule_cnt = j; return j;
}
static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
struct hlist_head tbl[],
int tbl_size, u32 id)
{
int i;
for (i = 0; i < tbl_size; i++) {
struct hlist_head *head;
struct bnxt_filter_base *fltr;
head = &tbl[i];
hlist_for_each_entry_rcu(fltr, head, hash) {
if (fltr->flags && fltr->sw_id == id)
return fltr;
}
}
return NULL;
}
static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
cmd->data = bp->ntp_fltr_count;
rcu_read_lock();
cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
BNXT_NTP_FLTR_HASH_SIZE,
rule_locs, 0, cmd->rule_cnt);
rcu_read_unlock();
return 0; return 0;
} }
...@@ -1041,27 +1072,24 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) ...@@ -1041,27 +1072,24 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
{ {
struct ethtool_rx_flow_spec *fs = struct ethtool_rx_flow_spec *fs =
(struct ethtool_rx_flow_spec *)&cmd->fs; (struct ethtool_rx_flow_spec *)&cmd->fs;
struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr; struct bnxt_ntuple_filter *fltr;
struct flow_keys *fkeys; struct flow_keys *fkeys;
int i, rc = -EINVAL; int rc = -EINVAL;
if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR) if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
return rc; return rc;
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
struct hlist_head *head;
head = &bp->ntp_fltr_hash_tbl[i];
rcu_read_lock(); rcu_read_lock();
hlist_for_each_entry_rcu(fltr, head, hash) { fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
if (fltr->sw_id == fs->location) BNXT_NTP_FLTR_HASH_SIZE,
goto fltr_found; fs->location);
} if (!fltr_base) {
rcu_read_unlock(); rcu_read_unlock();
}
return rc; return rc;
}
fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
fltr_found:
fkeys = &fltr->fkeys; fkeys = &fltr->fkeys;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) { if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
if (fkeys->basic.ip_proto == IPPROTO_TCP) if (fkeys->basic.ip_proto == IPPROTO_TCP)
...@@ -1071,20 +1099,23 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) ...@@ -1071,20 +1099,23 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
else else
goto fltr_err; goto fltr_err;
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
}
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
}
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
}
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
}
} else { } else {
int i;
if (fkeys->basic.ip_proto == IPPROTO_TCP) if (fkeys->basic.ip_proto == IPPROTO_TCP)
fs->flow_type = TCP_V6_FLOW; fs->flow_type = TCP_V6_FLOW;
else if (fkeys->basic.ip_proto == IPPROTO_UDP) else if (fkeys->basic.ip_proto == IPPROTO_UDP)
...@@ -1092,22 +1123,27 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) ...@@ -1092,22 +1123,27 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
else else
goto fltr_err; goto fltr_err;
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
fkeys->addrs.v6addrs.src; fkeys->addrs.v6addrs.src;
bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src);
}
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
fkeys->addrs.v6addrs.dst; fkeys->addrs.v6addrs.dst;
for (i = 0; i < 4; i++) { bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst);
fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
} }
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0); fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
}
if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0); fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
} }
}
fs->ring_cookie = fltr->rxq; fs->ring_cookie = fltr->base.rxq;
rc = 0; rc = 0;
fltr_err: fltr_err:
...@@ -1115,7 +1151,220 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) ...@@ -1115,7 +1151,220 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
return rc; return rc;
} }
#endif
#define IPV4_ALL_MASK ((__force __be32)~0)
#define L4_PORT_ALL_MASK ((__force __be16)~0)
static bool ipv6_mask_is_full(__be32 mask[4])
{
return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK;
}
static bool ipv6_mask_is_zero(__be32 mask[4])
{
return !(mask[0] | mask[1] | mask[2] | mask[3]);
}
static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
struct ethtool_rx_flow_spec *fs)
{
u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
struct bnxt_ntuple_filter *new_fltr, *fltr;
struct bnxt_l2_filter *l2_fltr;
u32 flow_type = fs->flow_type;
struct flow_keys *fkeys;
u32 idx;
int rc;
if (!bp->vnic_info)
return -EAGAIN;
if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
return -EOPNOTSUPP;
new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
if (!new_fltr)
return -ENOMEM;
l2_fltr = bp->vnic_info[0].l2_filters[0];
atomic_inc(&l2_fltr->refcnt);
new_fltr->l2_fltr = l2_fltr;
fkeys = &new_fltr->fkeys;
rc = -EOPNOTSUPP;
switch (flow_type) {
case TCP_V4_FLOW:
case UDP_V4_FLOW: {
struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec;
fkeys->basic.ip_proto = IPPROTO_TCP;
if (flow_type == UDP_V4_FLOW)
fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IP);
if (ip_mask->ip4src == IPV4_ALL_MASK) {
fkeys->addrs.v4addrs.src = ip_spec->ip4src;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
} else if (ip_mask->ip4src) {
goto ntuple_err;
}
if (ip_mask->ip4dst == IPV4_ALL_MASK) {
fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
} else if (ip_mask->ip4dst) {
goto ntuple_err;
}
if (ip_mask->psrc == L4_PORT_ALL_MASK) {
fkeys->ports.src = ip_spec->psrc;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
} else if (ip_mask->psrc) {
goto ntuple_err;
}
if (ip_mask->pdst == L4_PORT_ALL_MASK) {
fkeys->ports.dst = ip_spec->pdst;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
} else if (ip_mask->pdst) {
goto ntuple_err;
}
break;
}
case TCP_V6_FLOW:
case UDP_V6_FLOW: {
struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec;
struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec;
fkeys->basic.ip_proto = IPPROTO_TCP;
if (flow_type == UDP_V6_FLOW)
fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IPV6);
if (ipv6_mask_is_full(ip_mask->ip6src)) {
fkeys->addrs.v6addrs.src =
*(struct in6_addr *)&ip_spec->ip6src;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
} else if (!ipv6_mask_is_zero(ip_mask->ip6src)) {
goto ntuple_err;
}
if (ipv6_mask_is_full(ip_mask->ip6dst)) {
fkeys->addrs.v6addrs.dst =
*(struct in6_addr *)&ip_spec->ip6dst;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
} else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) {
goto ntuple_err;
}
if (ip_mask->psrc == L4_PORT_ALL_MASK) {
fkeys->ports.src = ip_spec->psrc;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
} else if (ip_mask->psrc) {
goto ntuple_err;
}
if (ip_mask->pdst == L4_PORT_ALL_MASK) {
fkeys->ports.dst = ip_spec->pdst;
new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
} else if (ip_mask->pdst) {
goto ntuple_err;
}
break;
}
default:
rc = -EOPNOTSUPP;
goto ntuple_err;
}
if (!new_fltr->ntuple_flags)
goto ntuple_err;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
rcu_read_lock();
fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
if (fltr) {
rcu_read_unlock();
rc = -EEXIST;
goto ntuple_err;
}
rcu_read_unlock();
new_fltr->base.rxq = ring;
new_fltr->base.flags = BNXT_ACT_NO_AGING;
__set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
if (!rc) {
rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr);
if (rc) {
bnxt_del_ntp_filter(bp, new_fltr);
return rc;
}
fs->location = new_fltr->base.sw_id;
return 0;
}
ntuple_err:
atomic_dec(&l2_fltr->refcnt);
kfree(new_fltr);
return rc;
}
static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fs = &cmd->fs;
u32 ring, flow_type;
int rc;
u8 vf;
if (!netif_running(bp->dev))
return -EAGAIN;
if (!(bp->flags & BNXT_FLAG_RFS))
return -EPERM;
if (fs->location != RX_CLS_LOC_ANY)
return -EINVAL;
ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
if (BNXT_VF(bp) && vf)
return -EINVAL;
if (BNXT_PF(bp) && vf > bp->pf.active_vfs)
return -EINVAL;
if (!vf && ring >= bp->rx_nr_rings)
return -EINVAL;
flow_type = fs->flow_type;
if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
return -EINVAL;
flow_type &= ~FLOW_EXT;
if (flow_type == ETHER_FLOW)
rc = -EOPNOTSUPP;
else
rc = bnxt_add_ntuple_cls_rule(bp, fs);
return rc;
}
static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct bnxt_filter_base *fltr_base;
rcu_read_lock();
fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
BNXT_NTP_FLTR_HASH_SIZE,
fs->location);
if (fltr_base) {
struct bnxt_ntuple_filter *fltr;
fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
rcu_read_unlock();
if (!(fltr->base.flags & BNXT_ACT_NO_AGING))
return -EINVAL;
bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr);
bnxt_del_ntp_filter(bp, fltr);
return 0;
}
rcu_read_unlock();
return -ENOENT;
}
static u64 get_ethtool_ipv4_rss(struct bnxt *bp) static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
{ {
...@@ -1265,14 +1514,13 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ...@@ -1265,14 +1514,13 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int rc = 0; int rc = 0;
switch (cmd->cmd) { switch (cmd->cmd) {
#ifdef CONFIG_RFS_ACCEL
case ETHTOOL_GRXRINGS: case ETHTOOL_GRXRINGS:
cmd->data = bp->rx_nr_rings; cmd->data = bp->rx_nr_rings;
break; break;
case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->ntp_fltr_count; cmd->rule_cnt = bp->ntp_fltr_count;
cmd->data = BNXT_NTP_FLTR_MAX_FLTR; cmd->data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL;
break; break;
case ETHTOOL_GRXCLSRLALL: case ETHTOOL_GRXCLSRLALL:
...@@ -1282,7 +1530,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ...@@ -1282,7 +1530,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRULE: case ETHTOOL_GRXCLSRULE:
rc = bnxt_grxclsrule(bp, cmd); rc = bnxt_grxclsrule(bp, cmd);
break; break;
#endif
case ETHTOOL_GRXFH: case ETHTOOL_GRXFH:
rc = bnxt_grxfh(bp, cmd); rc = bnxt_grxfh(bp, cmd);
...@@ -1306,6 +1553,14 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) ...@@ -1306,6 +1553,14 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
rc = bnxt_srxfh(bp, cmd); rc = bnxt_srxfh(bp, cmd);
break; break;
case ETHTOOL_SRXCLSRLINS:
rc = bnxt_srxclsrlins(bp, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
rc = bnxt_srxclsrldel(bp, cmd);
break;
default: default:
rc = -EOPNOTSUPP; rc = -EOPNOTSUPP;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment