Commit 05cc5a39 authored by Yuval Mintz's avatar Yuval Mintz Committed by David S. Miller

bnx2x: add vlan filtering offload

Current driver always uses vlan-promisc mode, i.e., it receives both
tagged and untagged traffic and lets the network stack drop packets
tagged with unrequested vlan tags.

This patch implements vlan-filtering offload in the driver -
Unless explicitly configured to promisc mode, only untagged packets or
packets tagged with requested vlans would reach the Rx flow.
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarAriel Elior <Ariel.Elior@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e82a08b0
...@@ -1228,6 +1228,10 @@ struct bnx2x_slowpath { ...@@ -1228,6 +1228,10 @@ struct bnx2x_slowpath {
struct eth_classify_rules_ramrod_data e2; struct eth_classify_rules_ramrod_data e2;
} mac_rdata; } mac_rdata;
union {
struct eth_classify_rules_ramrod_data e2;
} vlan_rdata;
union { union {
struct tstorm_eth_mac_filter_config e1x; struct tstorm_eth_mac_filter_config e1x;
struct eth_filter_rules_ramrod_data e2; struct eth_filter_rules_ramrod_data e2;
...@@ -1410,6 +1414,9 @@ struct bnx2x_sp_objs { ...@@ -1410,6 +1414,9 @@ struct bnx2x_sp_objs {
/* Queue State object */ /* Queue State object */
struct bnx2x_queue_sp_obj q_obj; struct bnx2x_queue_sp_obj q_obj;
/* VLANs object */
struct bnx2x_vlan_mac_obj vlan_obj;
}; };
struct bnx2x_fp_stats { struct bnx2x_fp_stats {
...@@ -1427,6 +1434,12 @@ enum { ...@@ -1427,6 +1434,12 @@ enum {
SUB_MF_MODE_BD, SUB_MF_MODE_BD,
}; };
struct bnx2x_vlan_entry {
struct list_head link;
u16 vid;
bool hw;
};
struct bnx2x { struct bnx2x {
/* Fields used in the tx and intr/napi performance paths /* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure * are grouped together in the beginning of the structure
...@@ -1865,8 +1878,6 @@ struct bnx2x { ...@@ -1865,8 +1878,6 @@ struct bnx2x {
int dcb_version; int dcb_version;
/* CAM credit pools */ /* CAM credit pools */
/* used only in sriov */
struct bnx2x_credit_pool_obj vlans_pool; struct bnx2x_credit_pool_obj vlans_pool;
struct bnx2x_credit_pool_obj macs_pool; struct bnx2x_credit_pool_obj macs_pool;
...@@ -1929,6 +1940,11 @@ struct bnx2x { ...@@ -1929,6 +1940,11 @@ struct bnx2x {
u16 rx_filter; u16 rx_filter;
struct bnx2x_link_report_data vf_link_vars; struct bnx2x_link_report_data vf_link_vars;
struct list_head vlan_reg;
u16 vlan_cnt;
u16 vlan_credit;
u16 vxlan_dst_port;
bool accept_any_vlan;
}; };
/* Tx queues may be less or equal to Rx queues */ /* Tx queues may be less or equal to Rx queues */
...@@ -1956,23 +1972,14 @@ extern int num_queues; ...@@ -1956,23 +1972,14 @@ extern int num_queues;
#define RSS_IPV6_TCP_CAP_MASK \ #define RSS_IPV6_TCP_CAP_MASK \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
/* func init flags */
#define FUNC_FLG_RSS 0x0001
#define FUNC_FLG_STATS 0x0002
/* removed FUNC_FLG_UNMATCHED 0x0004 */
#define FUNC_FLG_TPA 0x0008
#define FUNC_FLG_SPQ 0x0010
#define FUNC_FLG_LEADING 0x0020 /* PF only */
#define FUNC_FLG_LEADING_STATS 0x0040
struct bnx2x_func_init_params { struct bnx2x_func_init_params {
/* dma */ /* dma */
dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ bool spq_active;
dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ dma_addr_t spq_map;
u16 spq_prod;
u16 func_flgs;
u16 func_id; /* abs fid */ u16 func_id; /* abs fid */
u16 pf_id; u16 pf_id;
u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
}; };
#define for_each_cnic_queue(bp, var) \ #define for_each_cnic_queue(bp, var) \
...@@ -2082,6 +2089,11 @@ struct bnx2x_func_init_params { ...@@ -2082,6 +2089,11 @@ struct bnx2x_func_init_params {
int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set, struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags); int mac_type, unsigned long *ramrod_flags);
int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
struct bnx2x_vlan_mac_obj *obj, bool set,
unsigned long *ramrod_flags);
/** /**
* bnx2x_del_all_macs - delete all MACs configured for the specific MAC object * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
* *
...@@ -2486,6 +2498,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, ...@@ -2486,6 +2498,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define VF_ACQUIRE_THRESH 3 #define VF_ACQUIRE_THRESH 3
#define VF_ACQUIRE_MAC_FILTERS 1 #define VF_ACQUIRE_MAC_FILTERS 1
#define VF_ACQUIRE_MC_FILTERS 10 #define VF_ACQUIRE_MC_FILTERS 10
#define VF_ACQUIRE_VLAN_FILTERS 2 /* VLAN0 + 'real' VLAN */
#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \ #define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
(!((me_reg) & ME_REG_VF_ERR))) (!((me_reg) & ME_REG_VF_ERR)))
...@@ -2596,4 +2609,9 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb); ...@@ -2596,4 +2609,9 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
#define BNX2X_MAX_PHC_DRIFT 31000000 #define BNX2X_MAX_PHC_DRIFT 31000000
#define BNX2X_PTP_TX_TIMEOUT #define BNX2X_PTP_TX_TIMEOUT
/* Re-configure all previously configured vlan filters.
* Meant for implicit re-load flows.
*/
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
#endif /* bnx2x.h */ #endif /* bnx2x.h */
...@@ -2848,6 +2848,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) ...@@ -2848,6 +2848,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Start fast path */ /* Start fast path */
/* Re-configure vlan filters */
rc = bnx2x_vlan_reconfigure_vid(bp);
if (rc)
LOAD_ERROR_EXIT(bp, load_error3);
/* Initialize Rx filter. */ /* Initialize Rx filter. */
bnx2x_set_rx_mode_inner(bp); bnx2x_set_rx_mode_inner(bp);
......
...@@ -1066,6 +1066,15 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, ...@@ -1066,6 +1066,15 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
BNX2X_FILTER_MAC_PENDING, BNX2X_FILTER_MAC_PENDING,
&bp->sp_state, obj_type, &bp->sp_state, obj_type,
&bp->macs_pool); &bp->macs_pool);
if (!CHIP_IS_E1x(bp))
bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj,
fp->cl_id, fp->cid, BP_FUNC(bp),
bnx2x_sp(bp, vlan_rdata),
bnx2x_sp_mapping(bp, vlan_rdata),
BNX2X_FILTER_VLAN_PENDING,
&bp->sp_state, obj_type,
&bp->vlans_pool);
} }
/** /**
...@@ -1125,7 +1134,7 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp) ...@@ -1125,7 +1134,7 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
bnx2x_get_path_func_num(bp)); bnx2x_get_path_func_num(bp));
bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1, bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp),
bnx2x_get_path_func_num(bp)); bnx2x_get_path_func_num(bp));
/* RSS configuration object */ /* RSS configuration object */
...@@ -1135,6 +1144,8 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp) ...@@ -1135,6 +1144,8 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
bnx2x_sp_mapping(bp, rss_rdata), bnx2x_sp_mapping(bp, rss_rdata),
BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
BNX2X_OBJ_TYPE_RX); BNX2X_OBJ_TYPE_RX);
bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp));
} }
static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
......
...@@ -3067,7 +3067,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) ...@@ -3067,7 +3067,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
storm_memset_func_en(bp, p->func_id, 1); storm_memset_func_en(bp, p->func_id, 1);
/* spq */ /* spq */
if (p->func_flgs & FUNC_FLG_SPQ) { if (p->spq_active) {
storm_memset_spq_addr(bp, p->spq_map, p->func_id); storm_memset_spq_addr(bp, p->spq_map, p->func_id);
REG_WR(bp, XSEM_REG_FAST_MEMORY + REG_WR(bp, XSEM_REG_FAST_MEMORY +
XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
...@@ -3283,7 +3283,6 @@ static void bnx2x_pf_init(struct bnx2x *bp) ...@@ -3283,7 +3283,6 @@ static void bnx2x_pf_init(struct bnx2x *bp)
{ {
struct bnx2x_func_init_params func_init = {0}; struct bnx2x_func_init_params func_init = {0};
struct event_ring_data eq_data = { {0} }; struct event_ring_data eq_data = { {0} };
u16 flags;
if (!CHIP_IS_E1x(bp)) { if (!CHIP_IS_E1x(bp)) {
/* reset IGU PF statistics: MSIX + ATTN */ /* reset IGU PF statistics: MSIX + ATTN */
...@@ -3300,15 +3299,7 @@ static void bnx2x_pf_init(struct bnx2x *bp) ...@@ -3300,15 +3299,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
BP_FUNC(bp) : BP_VN(bp))*4, 0); BP_FUNC(bp) : BP_VN(bp))*4, 0);
} }
/* function setup flags */ func_init.spq_active = true;
flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
/* This flag is relevant for E1x only.
* E2 doesn't have a TPA configuration in a function level.
*/
flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
func_init.func_flgs = flags;
func_init.pf_id = BP_FUNC(bp); func_init.pf_id = BP_FUNC(bp);
func_init.func_id = BP_FUNC(bp); func_init.func_id = BP_FUNC(bp);
func_init.spq_map = bp->spq_mapping; func_init.spq_map = bp->spq_mapping;
...@@ -5303,6 +5294,10 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, ...@@ -5303,6 +5294,10 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
else else
vlan_mac_obj = &bp->sp_objs[cid].mac_obj; vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
break;
case BNX2X_FILTER_VLAN_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
break; break;
case BNX2X_FILTER_MCAST_PENDING: case BNX2X_FILTER_MCAST_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
...@@ -5617,7 +5612,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) ...@@ -5617,7 +5612,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
BNX2X_STATE_DIAG): BNX2X_STATE_DIAG):
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
BNX2X_STATE_CLOSING_WAIT4_HALT): BNX2X_STATE_CLOSING_WAIT4_HALT):
DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
bnx2x_handle_classification_eqe(bp, elem); bnx2x_handle_classification_eqe(bp, elem);
break; break;
...@@ -6205,6 +6200,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, ...@@ -6205,6 +6200,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
__set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags); __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
if (bp->accept_any_vlan) {
__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
}
break; break;
case BNX2X_RX_MODE_ALLMULTI: case BNX2X_RX_MODE_ALLMULTI:
__set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
...@@ -6216,6 +6216,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, ...@@ -6216,6 +6216,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
if (bp->accept_any_vlan) {
__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
}
break; break;
case BNX2X_RX_MODE_PROMISC: case BNX2X_RX_MODE_PROMISC:
/* According to definition of SI mode, iface in promisc mode /* According to definition of SI mode, iface in promisc mode
...@@ -6236,18 +6241,15 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, ...@@ -6236,18 +6241,15 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
else else
__set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
break; break;
default: default:
BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode); BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
return -EINVAL; return -EINVAL;
} }
/* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
if (rx_mode != BNX2X_RX_MODE_NONE) {
__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
}
return 0; return 0;
} }
...@@ -8441,6 +8443,42 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, ...@@ -8441,6 +8443,42 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
return rc; return rc;
} }
int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
struct bnx2x_vlan_mac_obj *obj, bool set,
unsigned long *ramrod_flags)
{
int rc;
struct bnx2x_vlan_mac_ramrod_params ramrod_param;
memset(&ramrod_param, 0, sizeof(ramrod_param));
/* Fill general parameters */
ramrod_param.vlan_mac_obj = obj;
ramrod_param.ramrod_flags = *ramrod_flags;
/* Fill a user request section if needed */
if (!test_bit(RAMROD_CONT, ramrod_flags)) {
ramrod_param.user_req.u.vlan.vlan = vlan;
/* Set the command: ADD or DEL */
if (set)
ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
else
ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
}
rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
if (rc == -EEXIST) {
/* Do not treat adding same vlan as error. */
DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
rc = 0;
} else if (rc < 0) {
BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
}
return rc;
}
int bnx2x_del_all_macs(struct bnx2x *bp, int bnx2x_del_all_macs(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *mac_obj, struct bnx2x_vlan_mac_obj *mac_obj,
int mac_type, bool wait_for_comp) int mac_type, bool wait_for_comp)
...@@ -12140,6 +12178,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) ...@@ -12140,6 +12178,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->drv_info_mutex); mutex_init(&bp->drv_info_mutex);
sema_init(&bp->stats_lock, 1); sema_init(&bp->stats_lock, 1);
bp->drv_info_mng_owner = false; bp->drv_info_mng_owner = false;
INIT_LIST_HEAD(&bp->vlan_reg);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
...@@ -12658,6 +12697,169 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb, ...@@ -12658,6 +12697,169 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
return vxlan_features_check(skb, features); return vxlan_features_check(skb, features);
} }
static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
{
int rc;
if (IS_PF(bp)) {
unsigned long ramrod_flags = 0;
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
add, &ramrod_flags);
} else {
rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
}
return rc;
}
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
{
struct bnx2x_vlan_entry *vlan;
int rc = 0;
if (!bp->vlan_cnt) {
DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
return 0;
}
list_for_each_entry(vlan, &bp->vlan_reg, link) {
/* Prepare for cleanup in case of errors */
if (rc) {
vlan->hw = false;
continue;
}
if (!vlan->hw)
continue;
DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
if (rc) {
BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
vlan->hw = false;
rc = -EINVAL;
continue;
}
}
return rc;
}
static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
bool hw = false;
int rc = 0;
if (!netif_running(bp->dev)) {
DP(NETIF_MSG_IFUP,
"Ignoring VLAN configuration the interface is down\n");
return -EFAULT;
}
DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
if (!vlan)
return -ENOMEM;
bp->vlan_cnt++;
if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
bp->accept_any_vlan = true;
if (IS_PF(bp))
bnx2x_set_rx_mode_inner(bp);
else
bnx2x_vfpf_storm_rx_mode(bp);
} else if (bp->vlan_cnt <= bp->vlan_credit) {
rc = __bnx2x_vlan_configure_vid(bp, vid, true);
hw = true;
}
vlan->vid = vid;
vlan->hw = hw;
if (!rc) {
list_add(&vlan->link, &bp->vlan_reg);
} else {
bp->vlan_cnt--;
kfree(vlan);
}
DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
return rc;
}
static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
int rc = 0;
if (!netif_running(bp->dev)) {
DP(NETIF_MSG_IFUP,
"Ignoring VLAN configuration the interface is down\n");
return -EFAULT;
}
DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
if (!bp->vlan_cnt) {
BNX2X_ERR("Unable to kill VLAN %d\n", vid);
return -EINVAL;
}
list_for_each_entry(vlan, &bp->vlan_reg, link)
if (vlan->vid == vid)
break;
if (vlan->vid != vid) {
BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
return -EINVAL;
}
if (vlan->hw)
rc = __bnx2x_vlan_configure_vid(bp, vid, false);
list_del(&vlan->link);
kfree(vlan);
bp->vlan_cnt--;
if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
/* Configure all non-configured entries */
list_for_each_entry(vlan, &bp->vlan_reg, link) {
if (vlan->hw)
continue;
rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
if (rc) {
BNX2X_ERR("Unable to config VLAN %d\n",
vlan->vid);
continue;
}
DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
vlan->vid);
vlan->hw = true;
}
DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
bp->accept_any_vlan = false;
if (IS_PF(bp))
bnx2x_set_rx_mode_inner(bp);
else
bnx2x_vfpf_storm_rx_mode(bp);
}
DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
return rc;
}
static const struct net_device_ops bnx2x_netdev_ops = { static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open, .ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close, .ndo_stop = bnx2x_close,
...@@ -12671,6 +12873,8 @@ static const struct net_device_ops bnx2x_netdev_ops = { ...@@ -12671,6 +12873,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_fix_features = bnx2x_fix_features, .ndo_fix_features = bnx2x_fix_features,
.ndo_set_features = bnx2x_set_features, .ndo_set_features = bnx2x_set_features,
.ndo_tx_timeout = bnx2x_tx_timeout, .ndo_tx_timeout = bnx2x_tx_timeout,
.ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2x, .ndo_poll_controller = poll_bnx2x,
#endif #endif
...@@ -12881,6 +13085,16 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, ...@@ -12881,6 +13085,16 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
/* VF with OLD Hypervisor or old PF do not support filtering */
if (IS_PF(bp)) {
if (CHIP_IS_E1x(bp))
bp->accept_any_vlan = true;
else
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
} else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_HIGHDMA;
......
...@@ -357,6 +357,23 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) ...@@ -357,6 +357,23 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->get(vp, 1); return vp->get(vp, 1);
} }
static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
{
struct bnx2x_credit_pool_obj *mp = o->macs_pool;
struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
if (!mp->get(mp, 1))
return false;
if (!vp->get(vp, 1)) {
mp->put(mp, 1);
return false;
}
return true;
}
static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
{ {
struct bnx2x_credit_pool_obj *mp = o->macs_pool; struct bnx2x_credit_pool_obj *mp = o->macs_pool;
...@@ -385,6 +402,22 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) ...@@ -385,6 +402,22 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
return vp->put(vp, 1); return vp->put(vp, 1);
} }
static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
{
struct bnx2x_credit_pool_obj *mp = o->macs_pool;
struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
if (!mp->put(mp, 1))
return false;
if (!vp->put(vp, 1)) {
mp->get(mp, 1);
return false;
}
return true;
}
/** /**
* __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
* *
...@@ -638,6 +671,26 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp, ...@@ -638,6 +671,26 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp,
return 0; return 0;
} }
static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
data->vlan_mac.mac, data->vlan_mac.vlan);
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
ETH_ALEN)) &&
(data->vlan_mac.is_inner_mac ==
pos->u.vlan_mac.is_inner_mac))
return -EEXIST;
return 0;
}
/* check_del() callbacks */ /* check_del() callbacks */
static struct bnx2x_vlan_mac_registry_elem * static struct bnx2x_vlan_mac_registry_elem *
bnx2x_check_mac_del(struct bnx2x *bp, bnx2x_check_mac_del(struct bnx2x *bp,
...@@ -672,6 +725,27 @@ static struct bnx2x_vlan_mac_registry_elem * ...@@ -672,6 +725,27 @@ static struct bnx2x_vlan_mac_registry_elem *
return NULL; return NULL;
} }
static struct bnx2x_vlan_mac_registry_elem *
bnx2x_check_vlan_mac_del(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
data->vlan_mac.mac, data->vlan_mac.vlan);
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
ETH_ALEN)) &&
(data->vlan_mac.is_inner_mac ==
pos->u.vlan_mac.is_inner_mac))
return pos;
return NULL;
}
/* check_move() callback */ /* check_move() callback */
static bool bnx2x_check_move(struct bnx2x *bp, static bool bnx2x_check_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *src_o, struct bnx2x_vlan_mac_obj *src_o,
...@@ -1038,6 +1112,96 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, ...@@ -1038,6 +1112,96 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
rule_cnt); rule_cnt);
} }
static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o,
struct bnx2x_exeq_elem *elem,
int rule_idx, int cam_offset)
{
struct bnx2x_raw_obj *raw = &o->raw;
struct eth_classify_rules_ramrod_data *data =
(struct eth_classify_rules_ramrod_data *)(raw->rdata);
int rule_cnt = rule_idx + 1;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
u16 inner_mac;
/* Reset the ramrod data buffer for the first rule */
if (rule_idx == 0)
memset(data, 0, sizeof(*data));
/* Set a rule header */
bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
&rule_entry->pair.header);
/* Set VLAN and MAC themselves */
rule_entry->pair.vlan = cpu_to_le16(vlan);
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
/* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) {
struct bnx2x_vlan_mac_obj *target_obj;
rule_entry++;
rule_cnt++;
/* Setup ramrod data */
target_obj = elem->cmd_data.vlan_mac.target_obj;
bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj,
true, CLASSIFY_RULE_OPCODE_PAIR,
&rule_entry->pair.header);
/* Set a VLAN itself */
rule_entry->pair.vlan = cpu_to_le16(vlan);
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
}
/* Set the ramrod data header */
bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
rule_cnt);
}
/**
* bnx2x_set_one_vlan_mac_e1h -
*
* @bp: device handle
* @o: bnx2x_vlan_mac_obj
* @elem: bnx2x_exeq_elem
* @rule_idx: rule_idx
* @cam_offset: cam_offset
*/
static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o,
struct bnx2x_exeq_elem *elem,
int rule_idx, int cam_offset)
{
struct bnx2x_raw_obj *raw = &o->raw;
struct mac_configuration_cmd *config =
(struct mac_configuration_cmd *)(raw->rdata);
/* 57710 and 57711 do not support MOVE command,
* so it's either ADD or DEL
*/
bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
true : false;
/* Reset the ramrod data buffer */
memset(config, 0, sizeof(*config));
bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
cam_offset, add,
elem->cmd_data.vlan_mac.u.vlan_mac.mac,
elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
ETH_VLAN_FILTER_CLASSIFY, config);
}
/** /**
* bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
* *
...@@ -1137,6 +1301,25 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( ...@@ -1137,6 +1301,25 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
return NULL; return NULL;
} }
static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
struct bnx2x_exe_queue_obj *o,
struct bnx2x_exeq_elem *elem)
{
struct bnx2x_exeq_elem *pos;
struct bnx2x_vlan_mac_ramrod_data *data =
&elem->cmd_data.vlan_mac.u.vlan_mac;
/* Check pending for execution commands */
list_for_each_entry(pos, &o->exe_queue, link)
if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
sizeof(*data)) &&
(pos->cmd_data.vlan_mac.cmd ==
elem->cmd_data.vlan_mac.cmd))
return pos;
return NULL;
}
/** /**
* bnx2x_validate_vlan_mac_add - check if an ADD command can be executed * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
* *
...@@ -2044,6 +2227,68 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp, ...@@ -2044,6 +2227,68 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
} }
} }
void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *vlan_mac_obj,
u8 cl_id, u32 cid, u8 func_id, void *rdata,
dma_addr_t rdata_mapping, int state,
unsigned long *pstate, bnx2x_obj_type type,
struct bnx2x_credit_pool_obj *macs_pool,
struct bnx2x_credit_pool_obj *vlans_pool)
{
union bnx2x_qable_obj *qable_obj =
(union bnx2x_qable_obj *)vlan_mac_obj;
bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
rdata_mapping, state, pstate, type,
macs_pool, vlans_pool);
/* CAM pool handling */
vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
/* CAM offset is relevant for 57710 and 57711 chips only which have a
* single CAM for both MACs and VLAN-MAC pairs. So the offset
* will be taken from MACs' pool object only.
*/
vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
if (CHIP_IS_E1(bp)) {
BNX2X_ERR("Do not support chips others than E2\n");
BUG();
} else if (CHIP_IS_E1H(bp)) {
vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
vlan_mac_obj->check_move = bnx2x_check_move_always_err;
vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
/* Exe Queue */
bnx2x_exe_queue_init(bp,
&vlan_mac_obj->exe_queue, 1, qable_obj,
bnx2x_validate_vlan_mac,
bnx2x_remove_vlan_mac,
bnx2x_optimize_vlan_mac,
bnx2x_execute_vlan_mac,
bnx2x_exeq_get_vlan_mac);
} else {
vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
vlan_mac_obj->check_move = bnx2x_check_move;
vlan_mac_obj->ramrod_cmd =
RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
/* Exe Queue */
bnx2x_exe_queue_init(bp,
&vlan_mac_obj->exe_queue,
CLASSIFY_RULES_COUNT,
qable_obj, bnx2x_validate_vlan_mac,
bnx2x_remove_vlan_mac,
bnx2x_optimize_vlan_mac,
bnx2x_execute_vlan_mac,
bnx2x_exeq_get_vlan_mac);
}
}
/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
static inline void __storm_memset_mac_filters(struct bnx2x *bp, static inline void __storm_memset_mac_filters(struct bnx2x *bp,
struct tstorm_eth_mac_filter_config *mac_filters, struct tstorm_eth_mac_filter_config *mac_filters,
...@@ -3856,7 +4101,7 @@ static bool bnx2x_credit_pool_get_entry_always_true( ...@@ -3856,7 +4101,7 @@ static bool bnx2x_credit_pool_get_entry_always_true(
* If credit is negative pool operations will always succeed (unlimited pool). * If credit is negative pool operations will always succeed (unlimited pool).
* *
*/ */
static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
int base, int credit) int base, int credit)
{ {
/* Zero the object first */ /* Zero the object first */
...@@ -3936,9 +4181,9 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, ...@@ -3936,9 +4181,9 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
/* CAM credit is equaly divided between all active functions /* CAM credit is equaly divided between all active functions
* on the PATH. * on the PATH.
*/ */
if ((func_num > 0)) { if (func_num > 0) {
if (!CHIP_REV_IS_SLOW(bp)) if (!CHIP_REV_IS_SLOW(bp))
cam_sz = (MAX_MAC_CREDIT_E2 / func_num); cam_sz = PF_MAC_CREDIT_E2(bp, func_num);
else else
cam_sz = BNX2X_CAM_SIZE_EMUL; cam_sz = BNX2X_CAM_SIZE_EMUL;
...@@ -3968,8 +4213,9 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, ...@@ -3968,8 +4213,9 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
* on the PATH. * on the PATH.
*/ */
if (func_num > 0) { if (func_num > 0) {
int credit = MAX_VLAN_CREDIT_E2 / func_num; int credit = PF_VLAN_CREDIT_E2(bp, func_num);
bnx2x_init_credit_pool(p, func_id * credit, credit);
bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit);
} else } else
/* this should never happen! Block VLAN operations. */ /* this should never happen! Block VLAN operations. */
bnx2x_init_credit_pool(p, 0, 0); bnx2x_init_credit_pool(p, 0, 0);
......
...@@ -1413,6 +1413,14 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp, ...@@ -1413,6 +1413,14 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
unsigned long *pstate, bnx2x_obj_type type, unsigned long *pstate, bnx2x_obj_type type,
struct bnx2x_credit_pool_obj *vlans_pool); struct bnx2x_credit_pool_obj *vlans_pool);
void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *vlan_mac_obj,
u8 cl_id, u32 cid, u8 func_id, void *rdata,
dma_addr_t rdata_mapping, int state,
unsigned long *pstate, bnx2x_obj_type type,
struct bnx2x_credit_pool_obj *macs_pool,
struct bnx2x_credit_pool_obj *vlans_pool);
int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp, int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o); struct bnx2x_vlan_mac_obj *o);
void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp, void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
...@@ -1483,6 +1491,8 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp, ...@@ -1483,6 +1491,8 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
struct bnx2x_credit_pool_obj *p, u8 func_id, struct bnx2x_credit_pool_obj *p, u8 func_id,
u8 func_num); u8 func_num);
void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
int base, int credit);
/****************** RSS CONFIGURATION ****************/ /****************** RSS CONFIGURATION ****************/
void bnx2x_init_rss_config_obj(struct bnx2x *bp, void bnx2x_init_rss_config_obj(struct bnx2x *bp,
...@@ -1510,4 +1520,12 @@ int bnx2x_config_rss(struct bnx2x *bp, ...@@ -1510,4 +1520,12 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table); u8 *ind_table);
#define PF_MAC_CREDIT_E2(bp, func_num) \
((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
#define PF_VLAN_CREDIT_E2(bp, func_num) \
((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
#endif /* BNX2X_SP_VERBS */ #endif /* BNX2X_SP_VERBS */
...@@ -197,14 +197,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, ...@@ -197,14 +197,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
setup_p->gen_params.stat_id = vfq_stat_id(vf, q); setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
setup_p->gen_params.fp_hsi = vf->fp_hsi; setup_p->gen_params.fp_hsi = vf->fp_hsi;
/* Setup-op pause params:
* Nothing to do, the pause thresholds are set by default to 0 which
* effectively turns off the feature for this queue. We don't want
* one queue (VF) to interfering with another queue (another VF)
*/
if (vf->cfg_flags & VF_CFG_FW_FC)
BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
vf->abs_vfid);
/* Setup-op flags: /* Setup-op flags:
* collect statistics, zero statistics, local-switching, security, * collect statistics, zero statistics, local-switching, security,
* OV for Flex10, RSS and MCAST for leading * OV for Flex10, RSS and MCAST for leading
...@@ -360,22 +352,24 @@ static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, ...@@ -360,22 +352,24 @@ static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
} }
static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
int qid, bool drv_only, bool mac) int qid, bool drv_only, int type)
{ {
struct bnx2x_vlan_mac_ramrod_params ramrod; struct bnx2x_vlan_mac_ramrod_params ramrod;
int rc; int rc;
DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
mac ? "MACs" : "VLANs"); (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
(type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
/* Prepare ramrod params */ /* Prepare ramrod params */
memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
if (mac) { if (type == BNX2X_VF_FILTER_VLAN_MAC) {
set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
} else if (type == BNX2X_VF_FILTER_MAC) {
set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
} else { } else {
set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
&ramrod.user_req.vlan_mac_flags);
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
} }
ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
...@@ -393,14 +387,11 @@ static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -393,14 +387,11 @@ static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
&ramrod.ramrod_flags); &ramrod.ramrod_flags);
if (rc) { if (rc) {
BNX2X_ERR("Failed to delete all %s\n", BNX2X_ERR("Failed to delete all %s\n",
mac ? "MACs" : "VLANs"); (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
(type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
return rc; return rc;
} }
/* Clear the vlan counters */
if (!mac)
atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
return 0; return 0;
} }
...@@ -414,13 +405,17 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, ...@@ -414,13 +405,17 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
vf->abs_vfid, filter->add ? "Adding" : "Deleting", vf->abs_vfid, filter->add ? "Adding" : "Deleting",
filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
(filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
/* Prepare ramrod params */ /* Prepare ramrod params */
memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
if (filter->type == BNX2X_VF_FILTER_VLAN) { if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
&ramrod.user_req.vlan_mac_flags); ramrod.user_req.u.vlan.vlan = filter->vid;
memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
} else if (filter->type == BNX2X_VF_FILTER_VLAN) {
ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
ramrod.user_req.u.vlan.vlan = filter->vid; ramrod.user_req.u.vlan.vlan = filter->vid;
} else { } else {
...@@ -431,16 +426,6 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, ...@@ -431,16 +426,6 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
BNX2X_VLAN_MAC_DEL; BNX2X_VLAN_MAC_DEL;
/* Verify there are available vlan credits */
if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
(atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
vf_vlan_rules_cnt(vf))) {
BNX2X_ERR("No credits for vlan [%d >= %d]\n",
atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
vf_vlan_rules_cnt(vf));
return -ENOMEM;
}
set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
if (drv_only) if (drv_only)
set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
...@@ -452,16 +437,13 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, ...@@ -452,16 +437,13 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
if (rc && rc != -EEXIST) { if (rc && rc != -EEXIST) {
BNX2X_ERR("Failed to %s %s\n", BNX2X_ERR("Failed to %s %s\n",
filter->add ? "add" : "delete", filter->add ? "add" : "delete",
filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
"VLAN"); "VLAN-MAC" :
(filter->type == BNX2X_VF_FILTER_MAC) ?
"MAC" : "VLAN");
return rc; return rc;
} }
/* Update the vlan counters */
if (filter->type == BNX2X_VF_FILTER_VLAN)
bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
&bnx2x_vfq(vf, qid, vlan_count));
return 0; return 0;
} }
...@@ -513,21 +495,7 @@ int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, ...@@ -513,21 +495,7 @@ int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
if (rc) if (rc)
goto op_err; goto op_err;
/* Configure vlan0 for leading queue */
if (!qid) {
struct bnx2x_vf_mac_vlan_filter filter;
memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
filter.type = BNX2X_VF_FILTER_VLAN;
filter.add = true;
filter.vid = 0;
rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
if (rc)
goto op_err;
}
/* Schedule the configuration of any pending vlan filters */ /* Schedule the configuration of any pending vlan filters */
vf->cfg_flags |= VF_CFG_VLAN;
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_MSG_IOV); BNX2X_MSG_IOV);
return 0; return 0;
...@@ -546,10 +514,16 @@ static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -546,10 +514,16 @@ static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* If needed, clean the filtering data base */ /* If needed, clean the filtering data base */
if ((qid == LEADING_IDX) && if ((qid == LEADING_IDX) &&
bnx2x_validate_vf_sp_objs(bp, vf, false)) { bnx2x_validate_vf_sp_objs(bp, vf, false)) {
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
BNX2X_VF_FILTER_VLAN_MAC);
if (rc)
goto op_err;
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
BNX2X_VF_FILTER_VLAN);
if (rc) if (rc)
goto op_err; goto op_err;
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
BNX2X_VF_FILTER_MAC);
if (rc) if (rc)
goto op_err; goto op_err;
} }
...@@ -682,11 +656,18 @@ int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) ...@@ -682,11 +656,18 @@ int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
/* Remove filtering if feasible */ /* Remove filtering if feasible */
if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
false, false); false,
BNX2X_VF_FILTER_VLAN_MAC);
if (rc)
goto op_err;
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
false,
BNX2X_VF_FILTER_VLAN);
if (rc) if (rc)
goto op_err; goto op_err;
rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
false, true); false,
BNX2X_VF_FILTER_MAC);
if (rc) if (rc)
goto op_err; goto op_err;
rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
...@@ -767,8 +748,6 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -767,8 +748,6 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
if (vf->cfg_flags & VF_CFG_INT_SIMD)
val |= IGU_VF_CONF_SINGLE_ISR_EN;
val &= ~IGU_VF_CONF_PARENT_MASK; val &= ~IGU_VF_CONF_PARENT_MASK;
val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
...@@ -847,29 +826,6 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) ...@@ -847,29 +826,6 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
return 0; return 0;
} }
static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
struct bnx2x_virtf *vf,
int new)
{
int num = vf_vlan_rules_cnt(vf);
int diff = new - num;
bool rc = true;
DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
vf->abs_vfid, new, num);
if (diff > 0)
rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
else if (diff < 0)
rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
if (rc)
vf_vlan_rules_cnt(vf) = new;
else
DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
vf->abs_vfid);
}
/* must be called after the number of PF queues and the number of VFs are /* must be called after the number of PF queues and the number of VFs are
* both known * both known
*/ */
...@@ -877,21 +833,13 @@ static void ...@@ -877,21 +833,13 @@ static void
bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{ {
struct vf_pf_resc_request *resc = &vf->alloc_resc; struct vf_pf_resc_request *resc = &vf->alloc_resc;
u16 vlan_count = 0;
/* will be set only during VF-ACQUIRE */ /* will be set only during VF-ACQUIRE */
resc->num_rxqs = 0; resc->num_rxqs = 0;
resc->num_txqs = 0; resc->num_txqs = 0;
/* no credit calculations for macs (just yet) */ resc->num_mac_filters = VF_MAC_CREDIT_CNT;
resc->num_mac_filters = 1; resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
/* divvy up vlan rules */
bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
vlan_count = 1 << ilog2(vlan_count);
bnx2x_iov_re_set_vlan_filters(bp, vf,
vlan_count / BNX2X_NR_VIRTFN(bp));
/* no real limitation */ /* no real limitation */
resc->num_mc_filters = 0; resc->num_mc_filters = 0;
...@@ -1625,6 +1573,11 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) ...@@ -1625,6 +1573,11 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
vf->filter_state = 0; vf->filter_state = 0;
vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
vf_vlan_rules_cnt(vf));
bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
vf_mac_rules_cnt(vf));
/* init mcast object - This object will be re-initialized /* init mcast object - This object will be re-initialized
* during VF-ACQUIRE with the proper cl_id and cid. * during VF-ACQUIRE with the proper cl_id and cid.
* It needs to be initialized here so that it can be safely * It needs to be initialized here so that it can be safely
...@@ -2037,12 +1990,11 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -2037,12 +1990,11 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
/* Save a vlan filter for the Hypervisor */
return ((req_resc->num_rxqs <= rxq_cnt) && return ((req_resc->num_rxqs <= rxq_cnt) &&
(req_resc->num_txqs <= txq_cnt) && (req_resc->num_txqs <= txq_cnt) &&
(req_resc->num_sbs <= vf_sb_count(vf)) && (req_resc->num_sbs <= vf_sb_count(vf)) &&
(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
(req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf))); (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
} }
/* CORE VF API */ /* CORE VF API */
...@@ -2096,16 +2048,12 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -2096,16 +2048,12 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf_sb_count(vf) = resc->num_sbs; vf_sb_count(vf) = resc->num_sbs;
vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
if (resc->num_mac_filters)
vf_mac_rules_cnt(vf) = resc->num_mac_filters;
/* Add an additional vlan filter credit for the hypervisor */
bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
DP(BNX2X_MSG_IOV, DP(BNX2X_MSG_IOV,
"Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
vf_sb_count(vf), vf_rxq_count(vf), vf_sb_count(vf), vf_rxq_count(vf),
vf_txq_count(vf), vf_mac_rules_cnt(vf), vf_txq_count(vf), vf_mac_rules_cnt(vf),
vf_vlan_rules_visible_cnt(vf)); vf_vlan_rules_cnt(vf));
/* Initialize the queues */ /* Initialize the queues */
if (!vf->vfqs) { if (!vf->vfqs) {
...@@ -2138,7 +2086,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -2138,7 +2086,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
{ {
struct bnx2x_func_init_params func_init = {0}; struct bnx2x_func_init_params func_init = {0};
u16 flags = 0;
int i; int i;
/* the sb resources are initialized at this point, do the /* the sb resources are initialized at this point, do the
...@@ -2165,23 +2112,9 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) ...@@ -2165,23 +2112,9 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
/* reset IGU VF statistics: MSIX */ /* reset IGU VF statistics: MSIX */
REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
/* vf init */
if (vf->cfg_flags & VF_CFG_STATS)
flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
if (vf->cfg_flags & VF_CFG_TPA)
flags |= FUNC_FLG_TPA;
if (is_vf_multi(vf))
flags |= FUNC_FLG_RSS;
/* function setup */ /* function setup */
func_init.func_flgs = flags;
func_init.pf_id = BP_FUNC(bp); func_init.pf_id = BP_FUNC(bp);
func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
func_init.fw_stat_map = vf->fw_stat_map;
func_init.spq_map = vf->spq_map;
func_init.spq_prod = 0;
bnx2x_func_init(bp, &func_init); bnx2x_func_init(bp, &func_init);
/* Enable the vf */ /* Enable the vf */
...@@ -2595,7 +2528,7 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) ...@@ -2595,7 +2528,7 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
for_each_vf(bp, vfidx) { for_each_vf(bp, vfidx) {
bulletin = BP_VF_BULLETIN(bp, vfidx); bulletin = BP_VF_BULLETIN(bp, vfidx);
if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) if (bulletin->valid_bitmap & VLAN_VALID)
bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
} }
} }
...@@ -2813,20 +2746,58 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) ...@@ -2813,20 +2746,58 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
return rc; return rc;
} }
int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
struct bnx2x_virtf *vf, bool accept)
{
struct bnx2x_rx_mode_ramrod_params rx_ramrod;
unsigned long accept_flags;
/* need to remove/add the VF's accept_any_vlan bit */
accept_flags = bnx2x_leading_vfq(vf, accept_flags);
if (accept)
set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
else
clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
accept_flags);
bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
bnx2x_config_rx_mode(bp, &rx_ramrod);
}
static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
u16 vlan, bool add)
{ {
struct bnx2x_queue_state_params q_params = {NULL};
struct bnx2x_vlan_mac_ramrod_params ramrod_param; struct bnx2x_vlan_mac_ramrod_params ramrod_param;
struct bnx2x_queue_update_params *update_params; unsigned long ramrod_flags = 0;
int rc = 0;
/* configure the new vlan to device */
memset(&ramrod_param, 0, sizeof(ramrod_param));
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
ramrod_param.ramrod_flags = ramrod_flags;
ramrod_param.user_req.u.vlan.vlan = vlan;
ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
: BNX2X_VLAN_MAC_DEL;
rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
if (rc) {
BNX2X_ERR("failed to configure vlan\n");
return -EINVAL;
}
return 0;
}
int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
{
struct pf_vf_bulletin_content *bulletin = NULL; struct pf_vf_bulletin_content *bulletin = NULL;
struct bnx2x_rx_mode_ramrod_params rx_ramrod;
struct bnx2x *bp = netdev_priv(dev); struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_mac_obj *vlan_obj; struct bnx2x_vlan_mac_obj *vlan_obj;
unsigned long vlan_mac_flags = 0; unsigned long vlan_mac_flags = 0;
unsigned long ramrod_flags = 0; unsigned long ramrod_flags = 0;
struct bnx2x_virtf *vf = NULL; struct bnx2x_virtf *vf = NULL;
unsigned long accept_flags; int i, rc;
int rc;
if (vlan > 4095) { if (vlan > 4095) {
BNX2X_ERR("illegal vlan value %d\n", vlan); BNX2X_ERR("illegal vlan value %d\n", vlan);
...@@ -2855,6 +2826,10 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) ...@@ -2855,6 +2826,10 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
bulletin->valid_bitmap &= ~(1 << VLAN_VALID); bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
bulletin->vlan = vlan; bulletin->vlan = vlan;
/* Post update on VF's bulletin board */
rc = bnx2x_post_vf_bulletin(bp, vfidx);
if (rc)
BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
mutex_unlock(&bp->vfdb->bulletin_mutex); mutex_unlock(&bp->vfdb->bulletin_mutex);
/* is vf initialized and queue set up? */ /* is vf initialized and queue set up? */
...@@ -2881,40 +2856,32 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) ...@@ -2881,40 +2856,32 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
goto out; goto out;
} }
/* need to remove/add the VF's accept_any_vlan bit */ /* clear accept_any_vlan when HV forces vlan, otherwise
accept_flags = bnx2x_leading_vfq(vf, accept_flags); * according to VF capabilities
if (vlan) */
clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
else bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
accept_flags);
bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
bnx2x_config_rx_mode(bp, &rx_ramrod);
/* configure the new vlan to device */ rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
memset(&ramrod_param, 0, sizeof(ramrod_param)); if (rc)
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
ramrod_param.vlan_mac_obj = vlan_obj;
ramrod_param.ramrod_flags = ramrod_flags;
set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
&ramrod_param.user_req.vlan_mac_flags);
ramrod_param.user_req.u.vlan.vlan = vlan;
ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
if (rc) {
BNX2X_ERR("failed to configure vlan\n");
rc = -EINVAL;
goto out; goto out;
}
/* send queue update ramrod to configure default vlan and silent /* send queue update ramrods to configure default vlan and
* vlan removal * silent vlan removal
*/ */
for_each_vfq(vf, i) {
struct bnx2x_queue_state_params q_params = {NULL};
struct bnx2x_queue_update_params *update_params;
q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
/* validate the Q is UP */
if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
BNX2X_Q_LOGICAL_STATE_ACTIVE)
continue;
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
q_params.cmd = BNX2X_Q_CMD_UPDATE; q_params.cmd = BNX2X_Q_CMD_UPDATE;
q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
update_params = &q_params.params.update; update_params = &q_params.params.update;
__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
&update_params->update_flags); &update_params->update_flags);
...@@ -2946,19 +2913,19 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) ...@@ -2946,19 +2913,19 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
/* Update the Queue state */ /* Update the Queue state */
rc = bnx2x_queue_state_change(bp, &q_params); rc = bnx2x_queue_state_change(bp, &q_params);
if (rc) { if (rc) {
BNX2X_ERR("Failed to configure default VLAN\n"); BNX2X_ERR("Failed to configure default VLAN queue %d\n",
i);
goto out; goto out;
} }
}
/* clear the flag indicating that this VF needs its vlan
* (will only be set if the HV configured the Vlan before vf was
* up and we were called because the VF came up later
*/
out: out:
vf->cfg_flags &= ~VF_CFG_VLAN;
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
if (rc)
DP(BNX2X_MSG_IOV,
"updated VF[%d] vlan configuration (vlan = %d)\n",
vfidx, vlan);
return rc; return rc;
} }
......
...@@ -77,7 +77,10 @@ struct bnx2x_vf_queue { ...@@ -77,7 +77,10 @@ struct bnx2x_vf_queue {
/* VLANs object */ /* VLANs object */
struct bnx2x_vlan_mac_obj vlan_obj; struct bnx2x_vlan_mac_obj vlan_obj;
atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
/* VLAN-MACs object */
struct bnx2x_vlan_mac_obj vlan_mac_obj;
unsigned long accept_flags; /* last accept flags configured */ unsigned long accept_flags; /* last accept flags configured */
/* Queue Slow-path State object */ /* Queue Slow-path State object */
...@@ -105,8 +108,10 @@ struct bnx2x_virtf; ...@@ -105,8 +108,10 @@ struct bnx2x_virtf;
struct bnx2x_vf_mac_vlan_filter { struct bnx2x_vf_mac_vlan_filter {
int type; int type;
#define BNX2X_VF_FILTER_MAC 1 #define BNX2X_VF_FILTER_MAC BIT(0)
#define BNX2X_VF_FILTER_VLAN 2 #define BNX2X_VF_FILTER_VLAN BIT(1)
#define BNX2X_VF_FILTER_VLAN_MAC \
(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
bool add; bool add;
u8 *mac; u8 *mac;
...@@ -121,14 +126,9 @@ struct bnx2x_vf_mac_vlan_filters { ...@@ -121,14 +126,9 @@ struct bnx2x_vf_mac_vlan_filters {
/* vf context */ /* vf context */
struct bnx2x_virtf { struct bnx2x_virtf {
u16 cfg_flags; u16 cfg_flags;
#define VF_CFG_STATS 0x0001 #define VF_CFG_STATS_COALESCE 0x1
#define VF_CFG_FW_FC 0x0002 #define VF_CFG_EXT_BULLETIN 0x2
#define VF_CFG_TPA 0x0004 #define VF_CFG_VLAN_FILTER 0x4
#define VF_CFG_INT_SIMD 0x0008
#define VF_CACHE_LINE 0x0010
#define VF_CFG_VLAN 0x0020
#define VF_CFG_STATS_COALESCE 0x0040
#define VF_CFG_EXT_BULLETIN 0x0080
u8 link_cfg; /* IFLA_VF_LINK_STATE_AUTO u8 link_cfg; /* IFLA_VF_LINK_STATE_AUTO
* IFLA_VF_LINK_STATE_ENABLE * IFLA_VF_LINK_STATE_ENABLE
* IFLA_VF_LINK_STATE_DISABLE * IFLA_VF_LINK_STATE_DISABLE
...@@ -142,9 +142,8 @@ struct bnx2x_virtf { ...@@ -142,9 +142,8 @@ struct bnx2x_virtf {
bool flr_clnup_stage; /* true during flr cleanup */ bool flr_clnup_stage; /* true during flr cleanup */
/* dma */ /* dma */
dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ dma_addr_t fw_stat_map;
u16 stats_stride; u16 stats_stride;
dma_addr_t spq_map;
dma_addr_t bulletin_map; dma_addr_t bulletin_map;
/* Allocated resources counters. Before the VF is acquired, the /* Allocated resources counters. Before the VF is acquired, the
...@@ -165,8 +164,6 @@ struct bnx2x_virtf { ...@@ -165,8 +164,6 @@ struct bnx2x_virtf {
#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) #define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) #define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) #define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
/* Hide a single vlan filter credit for the hypervisor */
#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1)
u8 sb_count; /* actual number of SBs */ u8 sb_count; /* actual number of SBs */
u8 igu_base_id; /* base igu status block id */ u8 igu_base_id; /* base igu status block id */
...@@ -209,6 +206,9 @@ struct bnx2x_virtf { ...@@ -209,6 +206,9 @@ struct bnx2x_virtf {
enum channel_tlvs op_current; enum channel_tlvs op_current;
u8 fp_hsi; u8 fp_hsi;
struct bnx2x_credit_pool_obj vf_vlans_pool;
struct bnx2x_credit_pool_obj vf_macs_pool;
}; };
#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) #define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
...@@ -232,6 +232,12 @@ struct bnx2x_virtf { ...@@ -232,6 +232,12 @@ struct bnx2x_virtf {
#define FW_VF_HANDLE(abs_vfid) \ #define FW_VF_HANDLE(abs_vfid) \
(abs_vfid + FW_PF_MAX_HANDLE) (abs_vfid + FW_PF_MAX_HANDLE)
#define GET_NUM_VFS_PER_PATH(bp) 64 /* use max possible value */
#define GET_NUM_VFS_PER_PF(bp) ((bp)->vfdb ? (bp)->vfdb->sriov.total \
: 0)
#define VF_MAC_CREDIT_CNT 1
#define VF_VLAN_CREDIT_CNT 2 /* VLAN0 + 'real' VLAN */
/* locking and unlocking the channel mutex */ /* locking and unlocking the channel mutex */
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv); enum channel_tlvs tlv);
...@@ -275,6 +281,10 @@ struct bnx2x_vf_sp { ...@@ -275,6 +281,10 @@ struct bnx2x_vf_sp {
struct eth_classify_rules_ramrod_data e2; struct eth_classify_rules_ramrod_data e2;
} vlan_rdata; } vlan_rdata;
union {
struct eth_classify_rules_ramrod_data e2;
} vlan_mac_rdata;
union { union {
struct eth_filter_rules_ramrod_data e2; struct eth_filter_rules_ramrod_data e2;
} rx_mode_rdata; } rx_mode_rdata;
...@@ -538,6 +548,7 @@ int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx); ...@@ -538,6 +548,7 @@ int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx);
int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state); int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add);
#else /* CONFIG_BNX2X_SRIOV */ #else /* CONFIG_BNX2X_SRIOV */
static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
...@@ -606,5 +617,7 @@ struct pf_vf_bulletin_content; ...@@ -606,5 +617,7 @@ struct pf_vf_bulletin_content;
static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin, static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
bool support_long) {} bool support_long) {}
static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; }
#endif /* CONFIG_BNX2X_SRIOV */ #endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */ #endif /* bnx2x_sriov.h */
...@@ -247,6 +247,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) ...@@ -247,6 +247,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
req->resc_request.num_sbs = bp->igu_sb_cnt; req->resc_request.num_sbs = bp->igu_sb_cnt;
req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS; req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS; req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
/* pf 2 vf bulletin board address */ /* pf 2 vf bulletin board address */
req->bulletin_addr = bp->pf2vf_bulletin_mapping; req->bulletin_addr = bp->pf2vf_bulletin_mapping;
...@@ -257,6 +258,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) ...@@ -257,6 +258,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* Bulletin support for bulletin board with length > legacy length */ /* Bulletin support for bulletin board with length > legacy length */
req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN; req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
/* vlan filtering is supported */
req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
/* add list termination tlv */ /* add list termination tlv */
bnx2x_add_tlv(bp, req, bnx2x_add_tlv(bp, req,
...@@ -375,6 +378,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) ...@@ -375,6 +378,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs; bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver)); sizeof(bp->fw_ver));
...@@ -548,7 +553,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -548,7 +553,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
BNX2X_FILTER_MAC_PENDING, BNX2X_FILTER_MAC_PENDING,
&vf->filter_state, &vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX, BNX2X_OBJ_TYPE_RX_TX,
&bp->macs_pool); &vf->vf_macs_pool);
/* vlan */ /* vlan */
bnx2x_init_vlan_obj(bp, &q->vlan_obj, bnx2x_init_vlan_obj(bp, &q->vlan_obj,
cl_id, q->cid, func_id, cl_id, q->cid, func_id,
...@@ -557,8 +562,17 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -557,8 +562,17 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
BNX2X_FILTER_VLAN_PENDING, BNX2X_FILTER_VLAN_PENDING,
&vf->filter_state, &vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX, BNX2X_OBJ_TYPE_RX_TX,
&bp->vlans_pool); &vf->vf_vlans_pool);
/* vlan-mac */
bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
cl_id, q->cid, func_id,
bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
BNX2X_FILTER_VLAN_MAC_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
&vf->vf_macs_pool,
&vf->vf_vlans_pool);
/* mcast */ /* mcast */
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
q->cid, func_id, func_id, q->cid, func_id, func_id,
...@@ -725,7 +739,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) ...@@ -725,7 +739,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID; req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
if (set) if (set)
req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC; req->filters[0].flags |= VFPF_Q_FILTER_SET;
/* sample bulletin board for new mac */ /* sample bulletin board for new mac */
bnx2x_sample_bulletin(bp); bnx2x_sample_bulletin(bp);
...@@ -913,6 +927,67 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev) ...@@ -913,6 +927,67 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
return 0; return 0;
} }
/* request pf to add a vlan for the vf */
int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
{
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
int rc = 0;
if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
return 0;
}
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
sizeof(*req));
req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
req->vf_qid = vf_qid;
req->n_mac_vlan_filters = 1;
req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
if (add)
req->filters[0].flags |= VFPF_Q_FILTER_SET;
/* sample bulletin board for hypervisor vlan */
bnx2x_sample_bulletin(bp);
if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
rc = -EINVAL;
goto out;
}
req->filters[0].vlan_tag = vid;
/* add list termination tlv */
bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* output tlvs list */
bnx2x_dp_tlv_list(bp, req);
/* send message to pf */
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc) {
BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
goto out;
}
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
vid);
rc = -EINVAL;
}
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
return rc;
}
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
{ {
int mode = bp->rx_mode; int mode = bp->rx_mode;
...@@ -936,8 +1011,13 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) ...@@ -936,8 +1011,13 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST; req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
if (mode == BNX2X_RX_MODE_PROMISC)
req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
} }
if (bp->accept_any_vlan)
req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
req->vf_qid = 0; req->vf_qid = 0;
...@@ -1190,7 +1270,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1190,7 +1270,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
PFVF_CAP_TPA | PFVF_CAP_TPA |
PFVF_CAP_TPA_UPDATE); PFVF_CAP_TPA_UPDATE |
PFVF_CAP_VLAN_FILTER);
bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
sizeof(resp->pfdev_info.fw_ver)); sizeof(resp->pfdev_info.fw_ver));
...@@ -1205,7 +1286,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1205,7 +1286,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_max_queue_cnt(bp, vf); bnx2x_vf_max_queue_cnt(bp, vf);
resc->num_sbs = vf_sb_count(vf); resc->num_sbs = vf_sb_count(vf);
resc->num_mac_filters = vf_mac_rules_cnt(vf); resc->num_mac_filters = vf_mac_rules_cnt(vf);
resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf); resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
resc->num_mc_filters = 0; resc->num_mc_filters = 0;
if (status == PFVF_STATUS_SUCCESS) { if (status == PFVF_STATUS_SUCCESS) {
...@@ -1372,6 +1453,14 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1372,6 +1453,14 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN; vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
} }
if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
vf->abs_vfid);
vf->cfg_flags |= VF_CFG_VLAN_FILTER;
} else {
vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
}
out: out:
/* response */ /* response */
bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc); bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
...@@ -1384,7 +1473,6 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1384,7 +1473,6 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
int rc; int rc;
/* record ghost addresses from vf message */ /* record ghost addresses from vf message */
vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr; vf->fw_stat_map = init->stats_addr;
vf->stats_stride = init->stats_stride; vf->stats_stride = init->stats_stride;
rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
...@@ -1580,17 +1668,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, ...@@ -1580,17 +1668,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
if ((msg_filter->flags & type_flag) != type_flag) if ((msg_filter->flags & type_flag) != type_flag)
continue; continue;
if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) { memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
fl->filters[j].mac = msg_filter->mac; fl->filters[j].mac = msg_filter->mac;
fl->filters[j].type = BNX2X_VF_FILTER_MAC; fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
} else { }
if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
fl->filters[j].vid = msg_filter->vlan_tag; fl->filters[j].vid = msg_filter->vlan_tag;
fl->filters[j].type = BNX2X_VF_FILTER_VLAN; fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
} }
fl->filters[j].add = fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
(msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
true : false;
fl->count++; fl->count++;
j++;
} }
if (!fl->count) if (!fl->count)
kfree(fl); kfree(fl);
...@@ -1600,6 +1689,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, ...@@ -1600,6 +1689,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
return 0; return 0;
} }
static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
u32 flags)
{
int i, cnt = 0;
for (i = 0; i < filters->n_mac_vlan_filters; i++)
if ((filters->filters[i].flags & flags) == flags)
cnt++;
return cnt;
}
static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx, static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
struct vfpf_q_mac_vlan_filter *filter) struct vfpf_q_mac_vlan_filter *filter)
{ {
...@@ -1631,6 +1732,7 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl, ...@@ -1631,6 +1732,7 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
#define VFPF_VLAN_MAC_FILTER (VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
{ {
...@@ -1641,17 +1743,17 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -1641,17 +1743,17 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
/* check for any mac/vlan changes */ /* check for any mac/vlan changes */
if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
/* build mac list */
struct bnx2x_vf_mac_vlan_filters *fl = NULL; struct bnx2x_vf_mac_vlan_filters *fl = NULL;
/* build vlan-mac list */
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
VFPF_MAC_FILTER); VFPF_VLAN_MAC_FILTER);
if (rc) if (rc)
goto op_err; goto op_err;
if (fl) { if (fl) {
/* set mac list */ /* set vlan-mac list */
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
msg->vf_qid, msg->vf_qid,
false); false);
...@@ -1659,22 +1761,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -1659,22 +1761,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
goto op_err; goto op_err;
} }
/* build vlan list */ /* build mac list */
fl = NULL; fl = NULL;
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
VFPF_VLAN_FILTER); VFPF_MAC_FILTER);
if (rc) if (rc)
goto op_err; goto op_err;
if (fl) { if (fl) {
/* set vlan list */ /* set mac list */
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
msg->vf_qid, msg->vf_qid,
false); false);
if (rc) if (rc)
goto op_err; goto op_err;
} }
} }
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
...@@ -1689,11 +1792,15 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -1689,11 +1792,15 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept); __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
} }
/* A packet arriving the vf's mac should be accepted /* any_vlan is not configured if HV is forcing VLAN
* with any vlan, unless a vlan has already been * any_vlan is configured if
* configured. * 1. VF does not support vlan filtering
* OR
* 2. VF supports vlan filtering and explicitly requested it
*/ */
if (!(bulletin->valid_bitmap & (1 << VLAN_VALID))) if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
(!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
/* set rx-mode */ /* set rx-mode */
...@@ -1729,17 +1836,31 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp, ...@@ -1729,17 +1836,31 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp,
* since queue was not set up. * since queue was not set up.
*/ */
if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) { if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
/* once a mac was set by ndo can only accept a single mac... */ struct vfpf_q_mac_vlan_filter *filter = NULL;
if (filters->n_mac_vlan_filters > 1) { int i;
BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
vf->abs_vfid); for (i = 0; i < filters->n_mac_vlan_filters; i++) {
if (!(filters->filters[i].flags &
VFPF_Q_FILTER_DEST_MAC_VALID))
continue;
/* once a mac was set by ndo can only accept
* a single mac...
*/
if (filter) {
BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
vf->abs_vfid,
filters->n_mac_vlan_filters);
rc = -EPERM; rc = -EPERM;
goto response; goto response;
} }
filter = &filters->filters[i];
}
/* ...and only the mac set by the ndo */ /* ...and only the mac set by the ndo */
if (filters->n_mac_vlan_filters == 1 && if (filter &&
!ether_addr_equal(filters->filters->mac, bulletin->mac)) { !ether_addr_equal(filter->mac, bulletin->mac)) {
BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n", BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
vf->abs_vfid); vf->abs_vfid);
...@@ -1761,19 +1882,16 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp, ...@@ -1761,19 +1882,16 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
/* if vlan was set by hypervisor we don't allow guest to config vlan */ /* if vlan was set by hypervisor we don't allow guest to config vlan */
if (bulletin->valid_bitmap & 1 << VLAN_VALID) { if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
int i;
/* search for vlan filters */ /* search for vlan filters */
for (i = 0; i < filters->n_mac_vlan_filters; i++) {
if (filters->filters[i].flags & if (bnx2x_vf_filters_contain(filters,
VFPF_Q_FILTER_VLAN_TAG_VALID) { VFPF_Q_FILTER_VLAN_TAG_VALID)) {
BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
vf->abs_vfid); vf->abs_vfid);
rc = -EPERM; rc = -EPERM;
goto response; goto response;
} }
} }
}
/* verify vf_qid */ /* verify vf_qid */
if (filters->vf_qid > vf_rxq_count(vf)) { if (filters->vf_qid > vf_rxq_count(vf)) {
......
...@@ -70,6 +70,8 @@ struct hw_sb_info { ...@@ -70,6 +70,8 @@ struct hw_sb_info {
#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004 #define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008 #define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010 #define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
#define VFPF_RX_MASK_ACCEPT_ANY_VLAN 0x00000020
#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content)) #define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content))
#define BULLETIN_CONTENT_LEGACY_SIZE (32) #define BULLETIN_CONTENT_LEGACY_SIZE (32)
#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */ #define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */
...@@ -133,6 +135,7 @@ struct vfpf_acquire_tlv { ...@@ -133,6 +135,7 @@ struct vfpf_acquire_tlv {
u8 fp_hsi_ver; u8 fp_hsi_ver;
u8 caps; u8 caps;
#define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0) #define VF_CAP_SUPPORT_EXT_BULLETIN (1 << 0)
#define VF_CAP_SUPPORT_VLAN_FILTER (1 << 1)
} vfdev_info; } vfdev_info;
struct vf_pf_resc_request resc_request; struct vf_pf_resc_request resc_request;
...@@ -178,6 +181,8 @@ struct pfvf_acquire_resp_tlv { ...@@ -178,6 +181,8 @@ struct pfvf_acquire_resp_tlv {
#define PFVF_CAP_DHC 0x00000002 #define PFVF_CAP_DHC 0x00000002
#define PFVF_CAP_TPA 0x00000004 #define PFVF_CAP_TPA 0x00000004
#define PFVF_CAP_TPA_UPDATE 0x00000008 #define PFVF_CAP_TPA_UPDATE 0x00000008
#define PFVF_CAP_VLAN_FILTER 0x00000010
char fw_ver[32]; char fw_ver[32];
u16 db_size; u16 db_size;
u8 indices_per_sb; u8 indices_per_sb;
...@@ -294,7 +299,7 @@ struct vfpf_q_mac_vlan_filter { ...@@ -294,7 +299,7 @@ struct vfpf_q_mac_vlan_filter {
u32 flags; u32 flags;
#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 #define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 #define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ #define VFPF_Q_FILTER_SET 0x100 /* set/clear */
u8 mac[ETH_ALEN]; u8 mac[ETH_ALEN];
u16 vlan_tag; u16 vlan_tag;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment