Commit b9871bcf authored by Ariel Elior's avatar Ariel Elior Committed by David S. Miller

bnx2x: VF RSS support - PF side

This patch adds support for Receive Side Scaling for queues of
Virtual Functions on the PF side. This includes support for the
requests for multiple queues from VF drivers, configuration of the
HW for multiple queues per VF, and support for rss configuration
of said queues.
Signed-off-by: default avatarAriel Elior <ariele@broadcom.com>
Signed-off-by: default avatarEilon Greenstein <eilong@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 53cf5275
...@@ -825,15 +825,13 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) ...@@ -825,15 +825,13 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
#define BNX2X_DB_SHIFT 7 /* 128 bytes*/ #define BNX2X_DB_SHIFT 3 /* 8 bytes*/
#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) #if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
#error "Min DB doorbell stride is 8" #error "Min DB doorbell stride is 8"
#endif #endif
#define DPM_TRIGER_TYPE 0x40
#define DOORBELL(bp, cid, val) \ #define DOORBELL(bp, cid, val) \
do { \ do { \
writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
DPM_TRIGER_TYPE); \
} while (0) } while (0)
/* TX CSUM helpers */ /* TX CSUM helpers */
...@@ -1100,13 +1098,27 @@ struct bnx2x_port { ...@@ -1100,13 +1098,27 @@ struct bnx2x_port {
extern struct workqueue_struct *bnx2x_wq; extern struct workqueue_struct *bnx2x_wq;
#define BNX2X_MAX_NUM_OF_VFS 64 #define BNX2X_MAX_NUM_OF_VFS 64
#define BNX2X_VF_CID_WND 0 #define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) #define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
#define BNX2X_CLIENTS_PER_VF 1
#define BNX2X_FIRST_VF_CID 256 /* We need to reserve doorbell addresses for all VF and queue combinations */
#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF) #define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
/* The doorbell is configured to have the same number of CIDs for PFs and for
* VFs. For this reason the PF CID zone is as large as the VF zone.
*/
#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS
#define BNX2X_MAX_NUM_VF_QUEUES 64
#define BNX2X_VF_ID_INVALID 0xFF #define BNX2X_VF_ID_INVALID 0xFF
/* the number of VF CIDS multiplied by the amount of bytes reserved for each
* cid must not exceed the size of the VF doorbell
*/
#define BNX2X_VF_BAR_SIZE 512
#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT))
#error "VF doorbell bar size is 512"
#endif
/* /*
* The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
* control by the number of fast-path status blocks supported by the * control by the number of fast-path status blocks supported by the
...@@ -1650,10 +1662,10 @@ struct bnx2x { ...@@ -1650,10 +1662,10 @@ struct bnx2x {
dma_addr_t fw_stats_data_mapping; dma_addr_t fw_stats_data_mapping;
int fw_stats_data_sz; int fw_stats_data_sz;
/* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB
* context size we need 8 ILT entries. * context size we need 8 ILT entries.
*/ */
#define ILT_MAX_L2_LINES 8 #define ILT_MAX_L2_LINES 32
struct hw_context context[ILT_MAX_L2_LINES]; struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt; struct bnx2x_ilt *ilt;
...@@ -1869,7 +1881,7 @@ extern int num_queues; ...@@ -1869,7 +1881,7 @@ extern int num_queues;
#define FUNC_FLG_TPA 0x0008 #define FUNC_FLG_TPA 0x0008
#define FUNC_FLG_SPQ 0x0010 #define FUNC_FLG_SPQ 0x0010
#define FUNC_FLG_LEADING 0x0020 /* PF only */ #define FUNC_FLG_LEADING 0x0020 /* PF only */
#define FUNC_FLG_LEADING_STATS 0x0040
struct bnx2x_func_init_params { struct bnx2x_func_init_params {
/* dma */ /* dma */
dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
......
...@@ -4784,6 +4784,11 @@ int bnx2x_resume(struct pci_dev *pdev) ...@@ -4784,6 +4784,11 @@ int bnx2x_resume(struct pci_dev *pdev)
void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
u32 cid) u32 cid)
{ {
if (!cxt) {
BNX2X_ERR("bad context pointer %p\n", cxt);
return;
}
/* ustorm cxt validation */ /* ustorm cxt validation */
cxt->ustorm_ag_context.cdu_usage = cxt->ustorm_ag_context.cdu_usage =
CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
......
...@@ -6893,7 +6893,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) ...@@ -6893,7 +6893,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
if (!CHIP_REV_IS_SLOW(bp)) if (!CHIP_REV_IS_SLOW(bp))
/* enable hw interrupt from doorbell Q */ /* enable hw interrupt from doorbell Q */
REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
......
...@@ -6335,6 +6335,7 @@ ...@@ -6335,6 +6335,7 @@
#define PCI_ID_VAL2 0x438 #define PCI_ID_VAL2 0x438
#define PCI_ID_VAL3 0x43c #define PCI_ID_VAL3 0x43c
#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
#define GRC_CONFIG_REG_PF_INIT_VF 0x624 #define GRC_CONFIG_REG_PF_INIT_VF 0x624
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf #define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
/* First VF_NUM for PF is encoded in this register. /* First VF_NUM for PF is encoded in this register.
......
...@@ -4416,6 +4416,16 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp, ...@@ -4416,6 +4416,16 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
rss_obj->config_rss = bnx2x_setup_rss; rss_obj->config_rss = bnx2x_setup_rss;
} }
int validate_vlan_mac(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *vlan_mac)
{
if (!vlan_mac->get_n_elements) {
BNX2X_ERR("vlan mac object was not intialized\n");
return -EINVAL;
}
return 0;
}
/********************** Queue state object ***********************************/ /********************** Queue state object ***********************************/
/** /**
......
...@@ -1407,4 +1407,6 @@ int bnx2x_config_rss(struct bnx2x *bp, ...@@ -1407,4 +1407,6 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table); u8 *ind_table);
int validate_vlan_mac(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *vlan_mac);
#endif /* BNX2X_SP_VERBS */ #endif /* BNX2X_SP_VERBS */
...@@ -170,6 +170,11 @@ enum bnx2x_vfop_qteardown_state { ...@@ -170,6 +170,11 @@ enum bnx2x_vfop_qteardown_state {
BNX2X_VFOP_QTEARDOWN_DONE BNX2X_VFOP_QTEARDOWN_DONE
}; };
enum bnx2x_vfop_rss_state {
BNX2X_VFOP_RSS_CONFIG,
BNX2X_VFOP_RSS_DONE
};
#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
...@@ -265,11 +270,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, ...@@ -265,11 +270,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
if (vfq_is_leading(q)) {
__set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
__set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
}
/* Setup-op rx parameters */ /* Setup-op rx parameters */
if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
...@@ -398,7 +398,11 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -398,7 +398,11 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
BNX2X_Q_LOGICAL_STATE_STOPPED) { BNX2X_Q_LOGICAL_STATE_STOPPED) {
DP(BNX2X_MSG_IOV, DP(BNX2X_MSG_IOV,
"Entered qdtor but queue was already stopped. Aborting gracefully\n"); "Entered qdtor but queue was already stopped. Aborting gracefully\n");
goto op_done;
/* next state */
vfop->state = BNX2X_VFOP_QDTOR_DONE;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
} }
/* next state */ /* next state */
...@@ -432,8 +436,10 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -432,8 +436,10 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
op_done: op_done:
case BNX2X_VFOP_QDTOR_DONE: case BNX2X_VFOP_QDTOR_DONE:
/* invalidate the context */ /* invalidate the context */
if (qdtor->cxt) {
qdtor->cxt->ustorm_ag_context.cdu_usage = 0; qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
}
bnx2x_vfop_end(bp, vf, vfop); bnx2x_vfop_end(bp, vf, vfop);
return; return;
default: default:
...@@ -465,7 +471,8 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, ...@@ -465,7 +471,8 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
cmd->block); cmd->block);
} }
DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid); DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
vf->abs_vfid, vfop->rc);
return -ENOMEM; return -ENOMEM;
} }
...@@ -474,10 +481,18 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) ...@@ -474,10 +481,18 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
{ {
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
if (vf) { if (vf) {
/* the first igu entry belonging to VFs of this PF */
if (!BP_VFDB(bp)->first_vf_igu_entry)
BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
/* the first igu entry belonging to this VF */
if (!vf_sb_count(vf)) if (!vf_sb_count(vf))
vf->igu_base_id = igu_sb_id; vf->igu_base_id = igu_sb_id;
++vf_sb_count(vf); ++vf_sb_count(vf);
++vf->sb_count;
} }
BP_VFDB(bp)->vf_sbs_pool++;
} }
/* VFOP MAC/VLAN helpers */ /* VFOP MAC/VLAN helpers */
...@@ -733,6 +748,7 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, ...@@ -733,6 +748,7 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only) int qid, bool drv_only)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
...@@ -752,6 +768,9 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, ...@@ -752,6 +768,9 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */ /* set extra args */
...@@ -772,6 +791,7 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, ...@@ -772,6 +791,7 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
int qid, bool drv_only) int qid, bool drv_only)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
...@@ -794,6 +814,9 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, ...@@ -794,6 +814,9 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */ /* set extra args */
...@@ -814,6 +837,7 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, ...@@ -814,6 +837,7 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
int qid, u16 vid, bool add) int qid, u16 vid, bool add)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
...@@ -834,6 +858,9 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, ...@@ -834,6 +858,9 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
ramrod->user_req.u.vlan.vlan = vid; ramrod->user_req.u.vlan.vlan = vid;
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */ /* set extra args */
...@@ -853,6 +880,7 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, ...@@ -853,6 +880,7 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only) int qid, bool drv_only)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
...@@ -872,6 +900,9 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, ...@@ -872,6 +900,9 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */ /* set extra args */
...@@ -892,6 +923,7 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, ...@@ -892,6 +923,7 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
int qid, bool drv_only) int qid, bool drv_only)
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
int rc;
if (vfop) { if (vfop) {
struct bnx2x_vfop_args_filters filters = { struct bnx2x_vfop_args_filters filters = {
...@@ -911,6 +943,9 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, ...@@ -911,6 +943,9 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */ /* set object */
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
if (rc)
return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */ /* set extra args */
...@@ -1021,21 +1056,25 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -1021,21 +1056,25 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_QFLR_CLR_VLAN: case BNX2X_VFOP_QFLR_CLR_VLAN:
/* vlan-clear-all: driver-only, don't consume credit */ /* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
true);
if (vfop->rc) if (vfop->rc)
goto op_err; goto op_err;
return; bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QFLR_CLR_MAC: case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */ /* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE; vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
true);
DP(BNX2X_MSG_IOV, DP(BNX2X_MSG_IOV,
"VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
vf->abs_vfid, vfop->rc); vf->abs_vfid, vfop->rc);
if (vfop->rc) if (vfop->rc)
goto op_err; goto op_err;
return; bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QFLR_TERMINATE: case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate; qstate = &vfop->op_p->qctor.qstate;
...@@ -1332,10 +1371,13 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, ...@@ -1332,10 +1371,13 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
{ {
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
/* for non leading queues skip directly to qdown sate */
if (vfop) { if (vfop) {
vfop->args.qx.qid = qid; vfop->args.qx.qid = qid;
bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE, bnx2x_vfop_opset(qid == LEADING_IDX ?
bnx2x_vfop_qdown, cmd->done); BNX2X_VFOP_QTEARDOWN_RXMODE :
BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
cmd->block); cmd->block);
} }
...@@ -1488,15 +1530,16 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) ...@@ -1488,15 +1530,16 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
* both known * both known
*/ */
static void static void
bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{ {
struct vf_pf_resc_request *resc = &vf->alloc_resc;
u16 vlan_count = 0; u16 vlan_count = 0;
/* will be set only during VF-ACQUIRE */ /* will be set only during VF-ACQUIRE */
resc->num_rxqs = 0; resc->num_rxqs = 0;
resc->num_txqs = 0; resc->num_txqs = 0;
/* no credit calculcis for macs (just yet) */ /* no credit calculations for macs (just yet) */
resc->num_mac_filters = 1; resc->num_mac_filters = 1;
/* divvy up vlan rules */ /* divvy up vlan rules */
...@@ -1508,13 +1551,14 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) ...@@ -1508,13 +1551,14 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
resc->num_mc_filters = 0; resc->num_mc_filters = 0;
/* num_sbs already set */ /* num_sbs already set */
resc->num_sbs = vf->sb_count;
} }
/* FLR routines: */ /* FLR routines: */
static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{ {
/* reset the state variables */ /* reset the state variables */
bnx2x_iov_static_resc(bp, &vf->alloc_resc); bnx2x_iov_static_resc(bp, vf);
vf->state = VF_FREE; vf->state = VF_FREE;
} }
...@@ -1734,8 +1778,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp) ...@@ -1734,8 +1778,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
/* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
* the Pf doorbell size although the 2 are independent. * the Pf doorbell size although the 2 are independent.
*/ */
REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
/* No security checks for now - /* No security checks for now -
* configure single rule (out of 16) mask = 0x1, value = 0x0, * configure single rule (out of 16) mask = 0x1, value = 0x0,
...@@ -1802,7 +1845,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) ...@@ -1802,7 +1845,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
{ {
int sb_id; int sb_id;
u32 val; u32 val;
u8 fid; u8 fid, current_pf = 0;
/* IGU in normal mode - read CAM */ /* IGU in normal mode - read CAM */
for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
...@@ -1810,16 +1853,18 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) ...@@ -1810,16 +1853,18 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
continue; continue;
fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
if (!(fid & IGU_FID_ENCODE_IS_PF)) if (fid & IGU_FID_ENCODE_IS_PF)
current_pf = fid & IGU_FID_PF_NUM_MASK;
else if (current_pf == BP_ABS_FUNC(bp))
bnx2x_vf_set_igu_info(bp, sb_id, bnx2x_vf_set_igu_info(bp, sb_id,
(fid & IGU_FID_VF_NUM_MASK)); (fid & IGU_FID_VF_NUM_MASK));
DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
(fid & IGU_FID_VF_NUM_MASK)), sb_id, (fid & IGU_FID_VF_NUM_MASK)), sb_id,
GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
} }
DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
} }
static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
...@@ -1885,23 +1930,11 @@ static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) ...@@ -1885,23 +1930,11 @@ static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
return 0; return 0;
} }
static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
{
int i;
u8 queue_count = 0;
if (IS_SRIOV(bp))
for_each_vf(bp, i)
queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
return queue_count;
}
/* must be called after PF bars are mapped */ /* must be called after PF bars are mapped */
int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
int num_vfs_param) int num_vfs_param)
{ {
int err, i, qcount; int err, i;
struct bnx2x_sriov *iov; struct bnx2x_sriov *iov;
struct pci_dev *dev = bp->pdev; struct pci_dev *dev = bp->pdev;
...@@ -1999,12 +2032,13 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, ...@@ -1999,12 +2032,13 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* re-read the IGU CAM for VFs - index and abs_vfid must be set */ /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
bnx2x_get_vf_igu_cam_info(bp); bnx2x_get_vf_igu_cam_info(bp);
/* get the total queue count and allocate the global queue arrays */
qcount = bnx2x_iov_get_max_queue_count(bp);
/* allocate the queue arrays for all VFs */ /* allocate the queue arrays for all VFs */
bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), bp->vfdb->vfqs = kzalloc(
BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
GFP_KERNEL); GFP_KERNEL);
DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
if (!bp->vfdb->vfqs) { if (!bp->vfdb->vfqs) {
BNX2X_ERR("failed to allocate vf queue array\n"); BNX2X_ERR("failed to allocate vf queue array\n");
err = -ENOMEM; err = -ENOMEM;
...@@ -2125,49 +2159,14 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -2125,49 +2159,14 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
q_type); q_type);
DP(BNX2X_MSG_IOV, DP(BNX2X_MSG_IOV,
"initialized vf %d's queue object. func id set to %d\n", "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
vf->abs_vfid, q->sp_obj.func_id); vf->abs_vfid, q->sp_obj.func_id, q->cid);
/* mac/vlan objects are per queue, but only those
* that belong to the leading queue are initialized
*/
if (vfq_is_leading(q)) {
/* mac */
bnx2x_init_mac_obj(bp, &q->mac_obj,
cl_id, q->cid, func_id,
bnx2x_vf_sp(bp, vf, mac_rdata),
bnx2x_vf_sp_map(bp, vf, mac_rdata),
BNX2X_FILTER_MAC_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
&bp->macs_pool);
/* vlan */
bnx2x_init_vlan_obj(bp, &q->vlan_obj,
cl_id, q->cid, func_id,
bnx2x_vf_sp(bp, vf, vlan_rdata),
bnx2x_vf_sp_map(bp, vf, vlan_rdata),
BNX2X_FILTER_VLAN_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
&bp->vlans_pool);
/* mcast */
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
q->cid, func_id, func_id,
bnx2x_vf_sp(bp, vf, mcast_rdata),
bnx2x_vf_sp_map(bp, vf, mcast_rdata),
BNX2X_FILTER_MCAST_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX);
vf->leading_rss = cl_id;
}
} }
/* called by bnx2x_nic_load */ /* called by bnx2x_nic_load */
int bnx2x_iov_nic_init(struct bnx2x *bp) int bnx2x_iov_nic_init(struct bnx2x *bp)
{ {
int vfid, qcount, i; int vfid;
if (!IS_SRIOV(bp)) { if (!IS_SRIOV(bp)) {
DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
...@@ -2196,7 +2195,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) ...@@ -2196,7 +2195,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
/* init statically provisioned resources */ /* init statically provisioned resources */
bnx2x_iov_static_resc(bp, &vf->alloc_resc); bnx2x_iov_static_resc(bp, vf);
/* queues are initialized during VF-ACQUIRE */ /* queues are initialized during VF-ACQUIRE */
...@@ -2232,13 +2231,12 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) ...@@ -2232,13 +2231,12 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
} }
/* Final VF init */ /* Final VF init */
qcount = 0; for_each_vf(bp, vfid) {
for_each_vf(bp, i) { struct bnx2x_virtf *vf = BP_VF(bp, vfid);
struct bnx2x_virtf *vf = BP_VF(bp, i);
/* fill in the BDF and bars */ /* fill in the BDF and bars */
vf->bus = bnx2x_vf_bus(bp, i); vf->bus = bnx2x_vf_bus(bp, vfid);
vf->devfn = bnx2x_vf_devfn(bp, i); vf->devfn = bnx2x_vf_devfn(bp, vfid);
bnx2x_vf_set_bars(bp, vf); bnx2x_vf_set_bars(bp, vf);
DP(BNX2X_MSG_IOV, DP(BNX2X_MSG_IOV,
...@@ -2247,10 +2245,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) ...@@ -2247,10 +2245,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
(unsigned)vf->bars[0].bar, vf->bars[0].size, (unsigned)vf->bars[0].bar, vf->bars[0].size,
(unsigned)vf->bars[1].bar, vf->bars[1].size, (unsigned)vf->bars[1].bar, vf->bars[1].size,
(unsigned)vf->bars[2].bar, vf->bars[2].size); (unsigned)vf->bars[2].bar, vf->bars[2].size);
/* set local queue arrays */
vf->vfqs = &bp->vfdb->vfqs[qcount];
qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
} }
return 0; return 0;
...@@ -2556,6 +2550,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) ...@@ -2556,6 +2550,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
for_each_vfq(vf, j) { for_each_vfq(vf, j) {
struct bnx2x_vf_queue *rxq = vfq_get(vf, j); struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
dma_addr_t q_stats_addr =
vf->fw_stat_map + j * vf->stats_stride;
/* collect stats fro active queues only */ /* collect stats fro active queues only */
if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
BNX2X_Q_LOGICAL_STATE_STOPPED) BNX2X_Q_LOGICAL_STATE_STOPPED)
...@@ -2563,13 +2560,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) ...@@ -2563,13 +2560,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
/* create stats query entry for this queue */ /* create stats query entry for this queue */
cur_query_entry->kind = STATS_TYPE_QUEUE; cur_query_entry->kind = STATS_TYPE_QUEUE;
cur_query_entry->index = vfq_cl_id(vf, rxq); cur_query_entry->index = vfq_stat_id(vf, rxq);
cur_query_entry->funcID = cur_query_entry->funcID =
cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
cur_query_entry->address.hi = cur_query_entry->address.hi =
cpu_to_le32(U64_HI(vf->fw_stat_map)); cpu_to_le32(U64_HI(q_stats_addr));
cur_query_entry->address.lo = cur_query_entry->address.lo =
cpu_to_le32(U64_LO(vf->fw_stat_map)); cpu_to_le32(U64_LO(q_stats_addr));
DP(BNX2X_MSG_IOV, DP(BNX2X_MSG_IOV,
"added address %x %x for vf %d queue %d client %d\n", "added address %x %x for vf %d queue %d client %d\n",
cur_query_entry->address.hi, cur_query_entry->address.hi,
...@@ -2578,6 +2575,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) ...@@ -2578,6 +2575,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
cur_query_entry++; cur_query_entry++;
cur_data_offset += sizeof(struct per_queue_stats); cur_data_offset += sizeof(struct per_queue_stats);
stats_count++; stats_count++;
/* all stats are coalesced to the leading queue */
if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
break;
} }
} }
bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
...@@ -2596,6 +2597,11 @@ void bnx2x_iov_sp_task(struct bnx2x *bp) ...@@ -2596,6 +2597,11 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
for_each_vf(bp, i) { for_each_vf(bp, i) {
struct bnx2x_virtf *vf = BP_VF(bp, i); struct bnx2x_virtf *vf = BP_VF(bp, i);
if (!vf) {
BNX2X_ERR("VF was null! skipping...\n");
continue;
}
if (!list_empty(&vf->op_list_head) && if (!list_empty(&vf->op_list_head) &&
atomic_read(&vf->op_in_progress)) { atomic_read(&vf->op_in_progress)) {
DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
...@@ -2743,7 +2749,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -2743,7 +2749,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q = vfq_get(vf, i); struct bnx2x_vf_queue *q = vfq_get(vf, i);
if (!q) { if (!q) {
DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); BNX2X_ERR("q number %d was not allocated\n", i);
return -EINVAL; return -EINVAL;
} }
...@@ -2947,6 +2953,43 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) ...@@ -2947,6 +2953,43 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
bnx2x_vfop_end(bp, vf, vfop); bnx2x_vfop_end(bp, vf, vfop);
} }
static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
enum bnx2x_vfop_rss_state state;
if (!vfop) {
BNX2X_ERR("vfop was null\n");
return;
}
state = vfop->state;
bnx2x_vfop_reset_wq(vf);
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
switch (state) {
case BNX2X_VFOP_RSS_CONFIG:
/* next state */
vfop->state = BNX2X_VFOP_RSS_DONE;
bnx2x_config_rss(bp, &vfop->op_p->rss);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
op_err:
BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
op_done:
case BNX2X_VFOP_RSS_DONE:
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
bnx2x_vfop_default(state);
}
op_pending:
return;
}
int bnx2x_vfop_release_cmd(struct bnx2x *bp, int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf, struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd) struct bnx2x_vfop_cmd *cmd)
...@@ -2961,6 +3004,21 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp, ...@@ -2961,6 +3004,21 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
return -ENOMEM; return -ENOMEM;
} }
int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
cmd->block);
}
return -ENOMEM;
}
/* VF release ~ VF close + VF release-resources /* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an * Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered. * irrecoverable error is encountered.
...@@ -2972,6 +3030,8 @@ void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) ...@@ -2972,6 +3030,8 @@ void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
.block = block, .block = block,
}; };
int rc; int rc;
DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
...@@ -3000,6 +3060,12 @@ static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -3000,6 +3060,12 @@ static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv) enum channel_tlvs tlv)
{ {
/* we don't lock the channel for unsupported tlvs */
if (!bnx2x_tlv_supported(tlv)) {
BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
return;
}
/* lock the channel */ /* lock the channel */
mutex_lock(&vf->op_mutex); mutex_lock(&vf->op_mutex);
...@@ -3014,19 +3080,32 @@ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -3014,19 +3080,32 @@ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs expected_tlv) enum channel_tlvs expected_tlv)
{ {
enum channel_tlvs current_tlv;
if (!vf) {
BNX2X_ERR("VF was %p\n", vf);
return;
}
current_tlv = vf->op_current;
/* we don't unlock the channel for unsupported tlvs */
if (!bnx2x_tlv_supported(expected_tlv))
return;
WARN(expected_tlv != vf->op_current, WARN(expected_tlv != vf->op_current,
"lock mismatch: expected %d found %d", expected_tlv, "lock mismatch: expected %d found %d", expected_tlv,
vf->op_current); vf->op_current);
/* record the locking op */
vf->op_current = CHANNEL_TLV_NONE;
/* lock the channel */ /* lock the channel */
mutex_unlock(&vf->op_mutex); mutex_unlock(&vf->op_mutex);
/* log the unlock */ /* log the unlock */
DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
vf->abs_vfid, vf->op_current); vf->abs_vfid, vf->op_current);
/* record the locking op */
vf->op_current = CHANNEL_TLV_NONE;
} }
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
...@@ -3057,11 +3136,77 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) ...@@ -3057,11 +3136,77 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
return bnx2x_enable_sriov(bp); return bnx2x_enable_sriov(bp);
} }
} }
#define IGU_ENTRY_SIZE 4
int bnx2x_enable_sriov(struct bnx2x *bp) int bnx2x_enable_sriov(struct bnx2x *bp)
{ {
int rc = 0, req_vfs = bp->requested_nr_virtfn; int rc = 0, req_vfs = bp->requested_nr_virtfn;
int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
u32 igu_entry, address;
u16 num_vf_queues;
if (req_vfs == 0)
return 0;
first_vf = bp->vfdb->sriov.first_vf_in_pf;
/* statically distribute vf sb pool between VFs */
num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
BP_VFDB(bp)->vf_sbs_pool / req_vfs);
/* zero previous values learned from igu cam */
for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
vf->sb_count = 0;
vf_sb_count(BP_VF(bp, vf_idx)) = 0;
}
bp->vfdb->vf_sbs_pool = 0;
/* prepare IGU cam */
sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
IGU_REG_MAPPING_MEMORY_VALID;
DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
sb_idx, vf_idx);
REG_WR(bp, address, igu_entry);
sb_idx++;
address += IGU_ENTRY_SIZE;
}
}
/* Reinitialize vf database according to igu cam */
bnx2x_get_vf_igu_cam_info(bp);
DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
qcount = 0;
for_each_vf(bp, vf_idx) {
struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
/* set local queue arrays */
vf->vfqs = &bp->vfdb->vfqs[qcount];
qcount += vf_sb_count(vf);
}
/* prepare msix vectors in VF configuration space */
for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
num_vf_queues);
}
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
/* enable sriov. This will probe all the VFs, and consequentially cause
* the "acquire" messages to appear on the VF PF channel.
*/
DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
pci_disable_sriov(bp->pdev);
rc = pci_enable_sriov(bp->pdev, req_vfs); rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) { if (rc) {
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
...@@ -3089,8 +3234,7 @@ void bnx2x_disable_sriov(struct bnx2x *bp) ...@@ -3089,8 +3234,7 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
pci_disable_sriov(bp->pdev); pci_disable_sriov(bp->pdev);
} }
static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
struct bnx2x_virtf **vf,
struct pf_vf_bulletin_content **bulletin) struct pf_vf_bulletin_content **bulletin)
{ {
if (bp->state != BNX2X_STATE_OPEN) { if (bp->state != BNX2X_STATE_OPEN) {
...@@ -3114,7 +3258,13 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, ...@@ -3114,7 +3258,13 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
*bulletin = BP_VF_BULLETIN(bp, vfidx); *bulletin = BP_VF_BULLETIN(bp, vfidx);
if (!*vf) { if (!*vf) {
BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
vfidx);
return -EINVAL;
}
if (!(*vf)->vfqs) {
BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
vfidx); vfidx);
return -EINVAL; return -EINVAL;
} }
...@@ -3142,8 +3292,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ...@@ -3142,8 +3292,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc) if (rc)
return rc; return rc;
mac_obj = &bnx2x_vfq(vf, 0, mac_obj); mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
if (!mac_obj || !vlan_obj) { if (!mac_obj || !vlan_obj) {
BNX2X_ERR("VF partially initialized\n"); BNX2X_ERR("VF partially initialized\n");
return -EINVAL; return -EINVAL;
...@@ -3155,10 +3305,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ...@@ -3155,10 +3305,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->spoofchk = 1; /*always enabled */ ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) { if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */ /* mac and vlan are in vlan_mac objects */
if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
0, ETH_ALEN); 0, ETH_ALEN);
vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
0, VLAN_HLEN); vlan_obj->get_n_elements(bp, vlan_obj, 1,
(u8 *)&ivi->vlan, 0,
VLAN_HLEN);
} else { } else {
/* mac */ /* mac */
if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
...@@ -3226,14 +3379,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) ...@@ -3226,14 +3379,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
return rc; return rc;
} }
/* is vf initialized and queue set up? */
q_logical_state = q_logical_state =
bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
if (vf->state == VF_ENABLED && if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */ /* configure the mac in device on this vf's queue */
unsigned long ramrod_flags = 0; unsigned long ramrod_flags = 0;
struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); struct bnx2x_vlan_mac_obj *mac_obj =
&bnx2x_leading_vfq(vf, mac_obj);
rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
if (rc)
return rc;
/* must lock vfpf channel to protect against vf flows */ /* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
...@@ -3293,18 +3450,21 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) ...@@ -3293,18 +3450,21 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
/* is vf initialized and queue set up? */ /* is vf initialized and queue set up? */
q_logical_state = q_logical_state =
bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
if (vf->state == VF_ENABLED && if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the vlan in device on this vf's queue */ /* configure the vlan in device on this vf's queue */
unsigned long ramrod_flags = 0; unsigned long ramrod_flags = 0;
unsigned long vlan_mac_flags = 0; unsigned long vlan_mac_flags = 0;
struct bnx2x_vlan_mac_obj *vlan_obj = struct bnx2x_vlan_mac_obj *vlan_obj =
&bnx2x_vfq(vf, 0, vlan_obj); &bnx2x_leading_vfq(vf, vlan_obj);
struct bnx2x_vlan_mac_ramrod_params ramrod_param; struct bnx2x_vlan_mac_ramrod_params ramrod_param;
struct bnx2x_queue_state_params q_params = {NULL}; struct bnx2x_queue_state_params q_params = {NULL};
struct bnx2x_queue_update_params *update_params; struct bnx2x_queue_update_params *update_params;
rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
if (rc)
return rc;
memset(&ramrod_param, 0, sizeof(ramrod_param)); memset(&ramrod_param, 0, sizeof(ramrod_param));
/* must lock vfpf channel to protect against vf flows */ /* must lock vfpf channel to protect against vf flows */
...@@ -3324,7 +3484,7 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) ...@@ -3324,7 +3484,7 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
*/ */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
q_params.cmd = BNX2X_Q_CMD_UPDATE; q_params.cmd = BNX2X_Q_CMD_UPDATE;
q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj); q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
update_params = &q_params.params.update; update_params = &q_params.params.update;
__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
&update_params->update_flags); &update_params->update_flags);
......
...@@ -81,6 +81,7 @@ struct bnx2x_vf_queue { ...@@ -81,6 +81,7 @@ struct bnx2x_vf_queue {
u32 cid; u32 cid;
u16 index; u16 index;
u16 sb_idx; u16 sb_idx;
bool is_leading;
}; };
/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: /* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
...@@ -194,6 +195,7 @@ struct bnx2x_virtf { ...@@ -194,6 +195,7 @@ struct bnx2x_virtf {
#define VF_CFG_INT_SIMD 0x0008 #define VF_CFG_INT_SIMD 0x0008
#define VF_CACHE_LINE 0x0010 #define VF_CACHE_LINE 0x0010
#define VF_CFG_VLAN 0x0020 #define VF_CFG_VLAN 0x0020
#define VF_CFG_STATS_COALESCE 0x0040
u8 state; u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */ #define VF_FREE 0 /* VF ready to be acquired holds no resc */
...@@ -213,6 +215,7 @@ struct bnx2x_virtf { ...@@ -213,6 +215,7 @@ struct bnx2x_virtf {
/* dma */ /* dma */
dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
u16 stats_stride;
dma_addr_t spq_map; dma_addr_t spq_map;
dma_addr_t bulletin_map; dma_addr_t bulletin_map;
...@@ -239,7 +242,10 @@ struct bnx2x_virtf { ...@@ -239,7 +242,10 @@ struct bnx2x_virtf {
u8 igu_base_id; /* base igu status block id */ u8 igu_base_id; /* base igu status block id */
struct bnx2x_vf_queue *vfqs; struct bnx2x_vf_queue *vfqs;
#define LEADING_IDX 0
#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX)
#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) #define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var)
u8 index; /* index in the vf array */ u8 index; /* index in the vf array */
u8 abs_vfid; u8 abs_vfid;
...@@ -358,6 +364,10 @@ struct bnx2x_vf_sp { ...@@ -358,6 +364,10 @@ struct bnx2x_vf_sp {
struct client_init_ramrod_data init_data; struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data; struct client_update_ramrod_data update_data;
} q_data; } q_data;
union {
struct eth_rss_update_ramrod_data e2;
} rss_rdata;
}; };
struct hw_dma { struct hw_dma {
...@@ -403,6 +413,10 @@ struct bnx2x_vfdb { ...@@ -403,6 +413,10 @@ struct bnx2x_vfdb {
#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) #define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
u32 flrd_vfs[FLRD_VFS_DWORDS]; u32 flrd_vfs[FLRD_VFS_DWORDS];
/* the number of msix vectors belonging to this PF designated for VFs */
u16 vf_sbs_pool;
u16 first_vf_igu_entry;
}; };
/* queue access */ /* queue access */
...@@ -411,11 +425,6 @@ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index) ...@@ -411,11 +425,6 @@ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
return &(vf->vfqs[index]); return &(vf->vfqs[index]);
} }
static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq)
{
return (vfq->index == 0);
}
/* FW ids */ /* FW ids */
static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
{ {
...@@ -434,6 +443,9 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) ...@@ -434,6 +443,9 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
{ {
if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
return vf->leading_rss;
else
return vfq_cl_id(vf, q); return vfq_cl_id(vf, q);
} }
...@@ -691,6 +703,10 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp, ...@@ -691,6 +703,10 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf, struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd); struct bnx2x_vfop_cmd *cmd);
int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd);
/* VF release ~ VF close + VF release-resources /* VF release ~ VF close + VF release-resources
* *
* Release is the ultimate SW shutdown and is called whenever an * Release is the ultimate SW shutdown and is called whenever an
...@@ -758,7 +774,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp); ...@@ -758,7 +774,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp);
void bnx2x_disable_sriov(struct bnx2x *bp); void bnx2x_disable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp) static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{ {
return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF; return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF;
} }
void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
......
...@@ -257,17 +257,23 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) ...@@ -257,17 +257,23 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* humble our request */ /* humble our request */
req->resc_request.num_txqs = req->resc_request.num_txqs =
bp->acquire_resp.resc.num_txqs; min(req->resc_request.num_txqs,
bp->acquire_resp.resc.num_txqs);
req->resc_request.num_rxqs = req->resc_request.num_rxqs =
bp->acquire_resp.resc.num_rxqs; min(req->resc_request.num_rxqs,
bp->acquire_resp.resc.num_rxqs);
req->resc_request.num_sbs = req->resc_request.num_sbs =
bp->acquire_resp.resc.num_sbs; min(req->resc_request.num_sbs,
bp->acquire_resp.resc.num_sbs);
req->resc_request.num_mac_filters = req->resc_request.num_mac_filters =
bp->acquire_resp.resc.num_mac_filters; min(req->resc_request.num_mac_filters,
bp->acquire_resp.resc.num_mac_filters);
req->resc_request.num_vlan_filters = req->resc_request.num_vlan_filters =
bp->acquire_resp.resc.num_vlan_filters; min(req->resc_request.num_vlan_filters,
bp->acquire_resp.resc.num_vlan_filters);
req->resc_request.num_mc_filters = req->resc_request.num_mc_filters =
bp->acquire_resp.resc.num_mc_filters; min(req->resc_request.num_mc_filters,
bp->acquire_resp.resc.num_mc_filters);
/* Clear response buffer */ /* Clear response buffer */
memset(&bp->vf2pf_mbox->resp, 0, memset(&bp->vf2pf_mbox->resp, 0,
...@@ -293,7 +299,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) ...@@ -293,7 +299,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->common.flash_size = 0; bp->common.flash_size = 0;
bp->flags |= bp->flags |=
NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
bp->igu_sb_cnt = 1; bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver)); sizeof(bp->fw_ver));
...@@ -452,6 +458,53 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp) ...@@ -452,6 +458,53 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp)
bnx2x_free_irq(bp); bnx2x_free_irq(bp);
} }
static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q)
{
u8 cl_id = vfq_cl_id(vf, q);
u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
/* mac */
bnx2x_init_mac_obj(bp, &q->mac_obj,
cl_id, q->cid, func_id,
bnx2x_vf_sp(bp, vf, mac_rdata),
bnx2x_vf_sp_map(bp, vf, mac_rdata),
BNX2X_FILTER_MAC_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
&bp->macs_pool);
/* vlan */
bnx2x_init_vlan_obj(bp, &q->vlan_obj,
cl_id, q->cid, func_id,
bnx2x_vf_sp(bp, vf, vlan_rdata),
bnx2x_vf_sp_map(bp, vf, vlan_rdata),
BNX2X_FILTER_VLAN_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX,
&bp->vlans_pool);
/* mcast */
bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
q->cid, func_id, func_id,
bnx2x_vf_sp(bp, vf, mcast_rdata),
bnx2x_vf_sp_map(bp, vf, mcast_rdata),
BNX2X_FILTER_MCAST_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX);
/* rss */
bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
func_id, func_id,
bnx2x_vf_sp(bp, vf, rss_rdata),
bnx2x_vf_sp_map(bp, vf, rss_rdata),
BNX2X_FILTER_RSS_CONF_PENDING,
&vf->filter_state,
BNX2X_OBJ_TYPE_RX_TX);
vf->leading_rss = cl_id;
q->is_leading = true;
}
/* ask the pf to open a queue for the vf */ /* ask the pf to open a queue for the vf */
int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
{ {
...@@ -948,7 +1001,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -948,7 +1001,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* fill in pfdev info */ /* fill in pfdev info */
resp->pfdev_info.chip_num = bp->common.chip_id; resp->pfdev_info.chip_num = bp->common.chip_id;
resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT); resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
/* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
...@@ -1054,8 +1107,13 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1054,8 +1107,13 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* record ghost addresses from vf message */ /* record ghost addresses from vf message */
vf->spq_map = init->spq_addr; vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr; vf->fw_stat_map = init->stats_addr;
vf->stats_stride = init->stats_stride;
vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
/* set VF multiqueue statistics collection mode */
if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
vf->cfg_flags |= VF_CFG_STATS_COALESCE;
/* response */ /* response */
bnx2x_vf_mbx_resp(bp, vf); bnx2x_vf_mbx_resp(bp, vf);
} }
...@@ -1080,6 +1138,8 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, ...@@ -1080,6 +1138,8 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_HC, sp_q_flags); __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
__set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
/* outer vlan removal is set according to PF's multi function mode */ /* outer vlan removal is set according to PF's multi function mode */
if (IS_MF_SD(bp)) if (IS_MF_SD(bp))
...@@ -1113,6 +1173,9 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1113,6 +1173,9 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_p; struct bnx2x_queue_init_params *init_p;
struct bnx2x_queue_setup_params *setup_p; struct bnx2x_queue_setup_params *setup_p;
if (bnx2x_vfq_is_leading(q))
bnx2x_leading_vfq_init(bp, vf, q);
/* re-init the VF operation context */ /* re-init the VF operation context */
memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
setup_p = &vf->op_params.qctor.prep_qsetup; setup_p = &vf->op_params.qctor.prep_qsetup;
...@@ -1552,6 +1615,68 @@ static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1552,6 +1615,68 @@ static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_mbx_resp(bp, vf); bnx2x_vf_mbx_resp(bp, vf);
} }
static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
vf->index);
vf->op_rc = -EINVAL;
goto mbx_resp;
}
/* set vfop params according to rss tlv */
memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
T_ETH_INDIRECTION_TABLE_SIZE);
memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
sizeof(rss_tlv->rss_key));
vf_op_params->rss_obj = &vf->rss_conf_obj;
vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
/* flags handled individually for backward/forward compatability */
if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
__set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
__set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
__set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
__set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
__set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
__set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
__set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
__set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
__set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
(!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
BNX2X_ERR("about to hit a FW assert. aborting...\n");
vf->op_rc = -EINVAL;
goto mbx_resp;
}
vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
mbx_resp:
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
}
/* dispatch request */ /* dispatch request */
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx) struct bnx2x_vf_mbx *mbx)
...@@ -1588,6 +1713,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1588,6 +1713,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
case CHANNEL_TLV_RELEASE: case CHANNEL_TLV_RELEASE:
bnx2x_vf_mbx_release_vf(bp, vf, mbx); bnx2x_vf_mbx_release_vf(bp, vf, mbx);
break; break;
case CHANNEL_TLV_UPDATE_RSS:
bnx2x_vf_mbx_update_rss(bp, vf, mbx);
break;
} }
} else { } else {
...@@ -1607,7 +1735,7 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1607,7 +1735,7 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* test whether we can respond to the VF (do we have an address /* test whether we can respond to the VF (do we have an address
* for it?) * for it?)
*/ */
if (vf->state == VF_ACQUIRED) { if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
/* mbx_resp uses the op_rc of the VF */ /* mbx_resp uses the op_rc of the VF */
vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
......
...@@ -51,6 +51,7 @@ struct hw_sb_info { ...@@ -51,6 +51,7 @@ struct hw_sb_info {
#define VFPF_QUEUE_FLG_COS 0x0080 #define VFPF_QUEUE_FLG_COS 0x0080
#define VFPF_QUEUE_FLG_HC 0x0100 #define VFPF_QUEUE_FLG_HC 0x0100
#define VFPF_QUEUE_FLG_DHC 0x0200 #define VFPF_QUEUE_FLG_DHC 0x0200
#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400
#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0) #define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1) #define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
...@@ -131,6 +132,27 @@ struct vfpf_q_op_tlv { ...@@ -131,6 +132,27 @@ struct vfpf_q_op_tlv {
u8 padding[3]; u8 padding[3];
}; };
/* receive side scaling tlv */
struct vfpf_rss_tlv {
struct vfpf_first_tlv first_tlv;
u32 rss_flags;
#define VFPF_RSS_MODE_DISABLED (1 << 0)
#define VFPF_RSS_MODE_REGULAR (1 << 1)
#define VFPF_RSS_SET_SRCH (1 << 2)
#define VFPF_RSS_IPV4 (1 << 3)
#define VFPF_RSS_IPV4_TCP (1 << 4)
#define VFPF_RSS_IPV4_UDP (1 << 5)
#define VFPF_RSS_IPV6 (1 << 6)
#define VFPF_RSS_IPV6_TCP (1 << 7)
#define VFPF_RSS_IPV6_UDP (1 << 8)
u8 rss_result_mask;
u8 ind_table_size;
u8 rss_key_size;
u8 padding;
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
u32 rss_key[T_ETH_RSS_KEY]; /* hash values */
};
/* acquire response tlv - carries the allocated resources */ /* acquire response tlv - carries the allocated resources */
struct pfvf_acquire_resp_tlv { struct pfvf_acquire_resp_tlv {
struct pfvf_tlv hdr; struct pfvf_tlv hdr;
...@@ -166,12 +188,20 @@ struct pfvf_acquire_resp_tlv { ...@@ -166,12 +188,20 @@ struct pfvf_acquire_resp_tlv {
} resc; } resc;
}; };
#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
* stats will be coalesced on
* the leading RSS queue
*/
/* Init VF */ /* Init VF */
struct vfpf_init_tlv { struct vfpf_init_tlv {
struct vfpf_first_tlv first_tlv; struct vfpf_first_tlv first_tlv;
aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */ aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
aligned_u64 spq_addr; aligned_u64 spq_addr;
aligned_u64 stats_addr; aligned_u64 stats_addr;
u16 stats_stride;
u32 flags;
u32 padding[2];
}; };
/* Setup Queue */ /* Setup Queue */
...@@ -294,6 +324,7 @@ union vfpf_tlvs { ...@@ -294,6 +324,7 @@ union vfpf_tlvs {
struct vfpf_setup_q_tlv setup_q; struct vfpf_setup_q_tlv setup_q;
struct vfpf_set_q_filters_tlv set_q_filters; struct vfpf_set_q_filters_tlv set_q_filters;
struct vfpf_release_tlv release; struct vfpf_release_tlv release;
struct vfpf_rss_tlv update_rss;
struct channel_list_end_tlv list_end; struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size; struct tlv_buffer_size tlv_buf_size;
}; };
...@@ -355,14 +386,18 @@ enum channel_tlvs { ...@@ -355,14 +386,18 @@ enum channel_tlvs {
CHANNEL_TLV_INIT, CHANNEL_TLV_INIT,
CHANNEL_TLV_SETUP_Q, CHANNEL_TLV_SETUP_Q,
CHANNEL_TLV_SET_Q_FILTERS, CHANNEL_TLV_SET_Q_FILTERS,
CHANNEL_TLV_ACTIVATE_Q,
CHANNEL_TLV_DEACTIVATE_Q,
CHANNEL_TLV_TEARDOWN_Q, CHANNEL_TLV_TEARDOWN_Q,
CHANNEL_TLV_CLOSE, CHANNEL_TLV_CLOSE,
CHANNEL_TLV_RELEASE, CHANNEL_TLV_RELEASE,
CHANNEL_TLV_UPDATE_RSS_DEPRECATED,
CHANNEL_TLV_PF_RELEASE_VF, CHANNEL_TLV_PF_RELEASE_VF,
CHANNEL_TLV_LIST_END, CHANNEL_TLV_LIST_END,
CHANNEL_TLV_FLR, CHANNEL_TLV_FLR,
CHANNEL_TLV_PF_SET_MAC, CHANNEL_TLV_PF_SET_MAC,
CHANNEL_TLV_PF_SET_VLAN, CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_MAX CHANNEL_TLV_MAX
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment