Commit 59cde76f authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

bnxt_en: Refactor filter insertion logic in bnxt_rx_flow_steer().

Add a new function bnxt_insert_ntp_filter() to insert the ntuple filter
into the hash table and other basic setup.  We'll use this function
to insert a user defined filter from ethtool.

Also, export bnxt_lookup_ntp_filter_from_idx() and bnxt_get_ntp_filter_idx()
for similar purposes.  All ntuple related functions are now no longer
compiled only for CONFIG_RFS_ACCEL
Reviewed-by: default avatarVasundhara Volam <vasundhara-v.volam@broadcom.com>
Reviewed-by: default avatarAndy Gospodarek <andrew.gospodarek@broadcom.com>
Reviewed-by: default avatarPavan Chebbi <pavan.chebbi@broadcom.com>
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ee908d05
......@@ -4800,7 +4800,6 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
#ifdef CONFIG_RFS_ACCEL
int i;
/* Under rtnl_lock and all our NAPIs have been disabled. It's
......@@ -4828,12 +4827,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
bitmap_free(bp->ntp_fltr_bmap);
bp->ntp_fltr_bmap = NULL;
bp->ntp_fltr_count = 0;
#endif
}
static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
{
#ifdef CONFIG_RFS_ACCEL
int i, rc = 0;
if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap)
......@@ -4849,9 +4846,6 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
rc = -ENOMEM;
return rc;
#else
return 0;
#endif
}
static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
......@@ -5615,7 +5609,6 @@ int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr)
return rc;
}
#ifdef CONFIG_RFS_ACCEL
static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
......@@ -5719,7 +5712,6 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
hwrm_req_drop(bp, req);
return rc;
}
#endif
static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
const u8 *mac_addr)
......@@ -9677,7 +9669,6 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
#ifdef CONFIG_RFS_ACCEL
int i, rc = 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
......@@ -9706,9 +9697,6 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
break;
}
return rc;
#else
return 0;
#endif
}
/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
......@@ -10036,7 +10024,6 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc;
}
#ifdef CONFIG_RFS_ACCEL
static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
{
return bp->hw_resc.max_rsscos_ctxs;
......@@ -10046,7 +10033,6 @@ static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
{
return bp->hw_resc.max_vnics;
}
#endif
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{
......@@ -12160,7 +12146,6 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp)
{
#ifdef CONFIG_RFS_ACCEL
int vnics, max_vnics, max_rss_ctxs;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
......@@ -12196,9 +12181,6 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
return false;
#else
return false;
#endif
}
static netdev_features_t bnxt_fix_features(struct net_device *dev,
......@@ -13861,8 +13843,8 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
static u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
const struct sk_buff *skb)
u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
const struct sk_buff *skb)
{
struct bnxt_vnic_info *vnic;
......@@ -13873,7 +13855,30 @@ static u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
}
#ifdef CONFIG_RFS_ACCEL
int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
u32 idx)
{
struct hlist_head *head;
int bit_id;
spin_lock_bh(&bp->ntp_fltr_lock);
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0);
if (bit_id < 0) {
spin_unlock_bh(&bp->ntp_fltr_lock);
return -ENOMEM;
}
fltr->base.sw_id = (u16)bit_id;
fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
fltr->base.flags |= BNXT_ACT_RING_DST;
head = &bp->ntp_fltr_hash_tbl[idx];
hlist_add_head_rcu(&fltr->base.hash, head);
set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
bp->ntp_fltr_count++;
spin_unlock_bh(&bp->ntp_fltr_lock);
return 0;
}
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2)
{
......@@ -13904,7 +13909,7 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
return false;
}
static struct bnxt_ntuple_filter *
struct bnxt_ntuple_filter *
bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr, u32 idx)
{
......@@ -13919,6 +13924,7 @@ bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
return NULL;
}
#ifdef CONFIG_RFS_ACCEL
static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
......@@ -13927,8 +13933,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct flow_keys *fkeys;
struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
struct bnxt_l2_filter *l2_fltr;
int rc = 0, idx, bit_id;
struct hlist_head *head;
int rc = 0, idx;
u32 flags;
if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
......@@ -13981,7 +13986,6 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
new_fltr->l2_fltr = l2_fltr;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
head = &bp->ntp_fltr_hash_tbl[idx];
rcu_read_lock();
fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
if (fltr) {
......@@ -13991,33 +13995,20 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
}
rcu_read_unlock();
spin_lock_bh(&bp->ntp_fltr_lock);
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0);
if (bit_id < 0) {
spin_unlock_bh(&bp->ntp_fltr_lock);
rc = -ENOMEM;
goto err_free;
}
new_fltr->base.sw_id = (u16)bit_id;
new_fltr->flow_id = flow_id;
new_fltr->base.rxq = rxq_index;
new_fltr->base.type = BNXT_FLTR_TYPE_NTUPLE;
new_fltr->base.flags = BNXT_ACT_RING_DST;
hlist_add_head_rcu(&new_fltr->base.hash, head);
set_bit(BNXT_FLTR_INSERTED, &new_fltr->base.state);
bp->ntp_fltr_count++;
spin_unlock_bh(&bp->ntp_fltr_lock);
bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
return new_fltr->base.sw_id;
rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
if (!rc) {
bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
return new_fltr->base.sw_id;
}
err_free:
bnxt_del_l2_filter(bp, l2_fltr);
kfree(new_fltr);
return rc;
}
#endif
static void bnxt_cfg_ntp_filters(struct bnxt *bp)
{
......@@ -14070,14 +14061,6 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
netdev_info(bp->dev, "Receive PF driver unload event!\n");
}
#else
static void bnxt_cfg_ntp_filters(struct bnxt *bp)
{
}
#endif /* CONFIG_RFS_ACCEL */
static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int entry, struct udp_tunnel_info *ti)
{
......
......@@ -2678,6 +2678,12 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int bnxt_fw_init_one(struct bnxt *bp);
bool bnxt_hwrm_reset_permitted(struct bnxt *bp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
struct bnxt_ntuple_filter *bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr, u32 idx);
u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
const struct sk_buff *skb);
int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
u32 idx);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
int bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_get_port_parent_id(struct net_device *dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment