Commit 992d38d2 authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

bnxt_en: Refactor bnxt_ntuple_filter structure.

This is in preparation to support user defined L2 (ether) filters,
which will have many similarities with ntuple filters.  Refactor
bnxt_ntuple_filter structure to have a bnxt_filter_base structure
that can be re-used by the L2 filters.
Reviewed-by: default avatarVasundhara Volam <vasundhara-v.volam@broadcom.com>
Reviewed-by: default avatarAndy Gospodarek <andrew.gospodarek@broadcom.com>
Reviewed-by: default avatarPavan Chebbi <pavan.chebbi@broadcom.com>
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8a48a2dc
......@@ -4803,8 +4803,8 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
struct bnxt_ntuple_filter *fltr;
head = &bp->ntp_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, hash) {
hlist_del(&fltr->hash);
hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
hlist_del(&fltr->base.hash);
kfree(fltr);
}
}
......@@ -5301,7 +5301,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
if (rc)
return rc;
req->ntuple_filter_id = fltr->filter_id;
req->ntuple_filter_id = fltr->base.filter_id;
return hwrm_req_send(bp, req);
}
......@@ -5342,9 +5342,9 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
req->dst_id = cpu_to_le16(fltr->rxq);
req->dst_id = cpu_to_le16(fltr->base.rxq);
} else {
vnic = &bp->vnic_info[fltr->rxq + 1];
vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
req->flags = cpu_to_le32(flags);
......@@ -5389,7 +5389,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
fltr->filter_id = resp->ntuple_filter_id;
fltr->base.filter_id = resp->ntuple_filter_id;
hwrm_req_drop(bp, req);
return rc;
}
......@@ -13653,9 +13653,9 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
head = &bp->ntp_fltr_hash_tbl[idx];
rcu_read_lock();
hlist_for_each_entry_rcu(fltr, head, hash) {
hlist_for_each_entry_rcu(fltr, head, base.hash) {
if (bnxt_fltr_match(fltr, new_fltr)) {
rc = fltr->sw_id;
rc = fltr->base.sw_id;
rcu_read_unlock();
goto err_free;
}
......@@ -13671,17 +13671,18 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
goto err_free;
}
new_fltr->sw_id = (u16)bit_id;
new_fltr->base.sw_id = (u16)bit_id;
new_fltr->flow_id = flow_id;
new_fltr->l2_fltr_idx = l2_idx;
new_fltr->rxq = rxq_index;
hlist_add_head_rcu(&new_fltr->hash, head);
new_fltr->base.rxq = rxq_index;
new_fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
hlist_add_head_rcu(&new_fltr->base.hash, head);
bp->ntp_fltr_count++;
spin_unlock_bh(&bp->ntp_fltr_lock);
bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
return new_fltr->sw_id;
return new_fltr->base.sw_id;
err_free:
kfree(new_fltr);
......@@ -13699,13 +13700,13 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
int rc;
head = &bp->ntp_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, hash) {
hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
bool del = false;
if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
if (rps_may_expire_flow(bp->dev, fltr->rxq,
if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) {
if (rps_may_expire_flow(bp->dev, fltr->base.rxq,
fltr->flow_id,
fltr->sw_id)) {
fltr->base.sw_id)) {
bnxt_hwrm_cfa_ntuple_filter_free(bp,
fltr);
del = true;
......@@ -13716,16 +13717,16 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
if (rc)
del = true;
else
set_bit(BNXT_FLTR_VALID, &fltr->state);
set_bit(BNXT_FLTR_VALID, &fltr->base.state);
}
if (del) {
spin_lock_bh(&bp->ntp_fltr_lock);
hlist_del_rcu(&fltr->hash);
hlist_del_rcu(&fltr->base.hash);
bp->ntp_fltr_count--;
spin_unlock_bh(&bp->ntp_fltr_lock);
synchronize_rcu();
clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
kfree(fltr);
}
}
......
......@@ -1332,21 +1332,34 @@ struct bnxt_pf_info {
struct bnxt_vf_info *vf;
};
struct bnxt_ntuple_filter {
struct bnxt_filter_base {
struct hlist_node hash;
u8 dst_mac_addr[ETH_ALEN];
u8 src_mac_addr[ETH_ALEN];
struct flow_keys fkeys;
__le64 filter_id;
u8 type;
#define BNXT_FLTR_TYPE_NTUPLE 1
#define BNXT_FLTR_TYPE_L2 2
u8 flags;
#define BNXT_ACT_DROP 1
#define BNXT_ACT_RING_DST 2
#define BNXT_ACT_FUNC_DST 4
u16 sw_id;
u8 l2_fltr_idx;
u16 rxq;
u32 flow_id;
u16 fw_vnic_id;
u16 vf_idx;
unsigned long state;
#define BNXT_FLTR_VALID 0
#define BNXT_FLTR_UPDATE 1
};
struct bnxt_ntuple_filter {
struct bnxt_filter_base base;
u8 dst_mac_addr[ETH_ALEN];
u8 src_mac_addr[ETH_ALEN];
struct flow_keys fkeys;
u8 l2_fltr_idx;
u32 flow_id;
};
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
......
......@@ -1024,10 +1024,10 @@ static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
head = &bp->ntp_fltr_hash_tbl[i];
rcu_read_lock();
hlist_for_each_entry_rcu(fltr, head, hash) {
hlist_for_each_entry_rcu(fltr, head, base.hash) {
if (j == cmd->rule_cnt)
break;
rule_locs[j++] = fltr->sw_id;
rule_locs[j++] = fltr->base.sw_id;
}
rcu_read_unlock();
if (j == cmd->rule_cnt)
......@@ -1053,8 +1053,8 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
head = &bp->ntp_fltr_hash_tbl[i];
rcu_read_lock();
hlist_for_each_entry_rcu(fltr, head, hash) {
if (fltr->sw_id == fs->location)
hlist_for_each_entry_rcu(fltr, head, base.hash) {
if (fltr->base.sw_id == fs->location)
goto fltr_found;
}
rcu_read_unlock();
......@@ -1107,7 +1107,7 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
}
fs->ring_cookie = fltr->rxq;
fs->ring_cookie = fltr->base.rxq;
rc = 0;
fltr_err:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment