Commit 14a94ebd authored by Michal Kalderon's avatar Michal Kalderon Committed by David S. Miller

bnx2x: Add support in PF driver for RSC

This provides PF-side support for VFs assigned to a VM running windows
2012 with the RSC feature enabled.
Signed-off-by: default avatarMichal Kalderon <michals@broadcom.com>
Signed-off-by: default avatarYuval Mintz <yuvalmin@broadcom.com>
Signed-off-by: default avatarAriel Elior <ariele@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ba72f32c
...@@ -1270,6 +1270,7 @@ struct bnx2x_slowpath { ...@@ -1270,6 +1270,7 @@ struct bnx2x_slowpath {
union { union {
struct client_init_ramrod_data init_data; struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data; struct client_update_ramrod_data update_data;
struct tpa_update_ramrod_data tpa_data;
} q_rdata; } q_rdata;
union { union {
......
...@@ -1814,6 +1814,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) ...@@ -1814,6 +1814,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
drv_cmd = BNX2X_Q_CMD_EMPTY; drv_cmd = BNX2X_Q_CMD_EMPTY;
break; break;
case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
break;
default: default:
BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
command, fp->index); command, fp->index);
...@@ -3644,10 +3649,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, ...@@ -3644,10 +3649,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
HW_CID(bp, cid)); HW_CID(bp, cid));
type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; /* In some cases, type may already contain the func-id
* mainly in SRIOV related use cases, so we add it here only
type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & * if it's not already set.
SPE_HDR_FUNCTION_ID); */
if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
SPE_HDR_CONN_TYPE;
type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID);
} else {
type = cmd_type;
}
spe->hdr.type = cpu_to_le16(type); spe->hdr.type = cpu_to_le16(type);
......
...@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params { ...@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params {
u8 cid_index; u8 cid_index;
}; };
struct bnx2x_queue_update_tpa_params {
dma_addr_t sge_map;
u8 update_ipv4;
u8 update_ipv6;
u8 max_tpa_queues;
u8 max_sges_pkt;
u8 complete_on_both_clients;
u8 dont_verify_thr;
u8 tpa_mode;
u8 _pad;
u16 sge_buff_sz;
u16 max_agg_sz;
u16 sge_pause_thr_low;
u16 sge_pause_thr_high;
};
struct rxq_pause_params { struct rxq_pause_params {
u16 bd_th_lo; u16 bd_th_lo;
u16 bd_th_hi; u16 bd_th_hi;
...@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params { ...@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params {
/* Params according to the current command */ /* Params according to the current command */
union { union {
struct bnx2x_queue_update_params update; struct bnx2x_queue_update_params update;
struct bnx2x_queue_update_tpa_params update_tpa;
struct bnx2x_queue_setup_params setup; struct bnx2x_queue_setup_params setup;
struct bnx2x_queue_init_params init; struct bnx2x_queue_init_params init;
struct bnx2x_queue_setup_tx_only_params tx_only; struct bnx2x_queue_setup_tx_only_params tx_only;
......
...@@ -176,6 +176,11 @@ enum bnx2x_vfop_rss_state { ...@@ -176,6 +176,11 @@ enum bnx2x_vfop_rss_state {
BNX2X_VFOP_RSS_DONE BNX2X_VFOP_RSS_DONE
}; };
enum bnx2x_vfop_tpa_state {
BNX2X_VFOP_TPA_CONFIG,
BNX2X_VFOP_TPA_DONE
};
#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
...@@ -3047,6 +3052,83 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp, ...@@ -3047,6 +3052,83 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
return -ENOMEM; return -ENOMEM;
} }
/* VFOP tpa update, send update on all queues */
static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa;
enum bnx2x_vfop_tpa_state state = vfop->state;
bnx2x_vfop_reset_wq(vf);
if (vfop->rc < 0)
goto op_err;
DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n",
vf->abs_vfid, tpa_args->qid,
state);
switch (state) {
case BNX2X_VFOP_TPA_CONFIG:
if (tpa_args->qid < vf_rxq_count(vf)) {
struct bnx2x_queue_state_params *qstate =
&vf->op_params.qstate;
qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj);
/* The only thing that changes for the ramrod params
* between calls is the sge_map
*/
qstate->params.update_tpa.sge_map =
tpa_args->sge_map[tpa_args->qid];
DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n",
tpa_args->qid,
U64_HI(qstate->params.update_tpa.sge_map),
U64_LO(qstate->params.update_tpa.sge_map));
qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA;
vfop->rc = bnx2x_queue_state_change(bp, qstate);
tpa_args->qid++;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
}
vfop->state = BNX2X_VFOP_TPA_DONE;
vfop->rc = 0;
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
op_err:
BNX2X_ERR("TPA update error: rc %d\n", vfop->rc);
op_done:
case BNX2X_VFOP_TPA_DONE:
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
bnx2x_vfop_default(state);
}
op_pending:
return;
}
int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct vfpf_tpa_tlv *tpa_tlv)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
vfop->args.qx.qid = 0; /* loop */
memcpy(&vfop->args.tpa.sge_map,
tpa_tlv->tpa_client_info.sge_addr,
sizeof(vfop->args.tpa.sge_map));
bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG,
bnx2x_vfop_tpa, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa,
cmd->block);
}
return -ENOMEM;
}
/* VF release ~ VF close + VF release-resources /* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an * Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered. * irrecoverable error is encountered.
......
...@@ -100,6 +100,7 @@ union bnx2x_vfop_params { ...@@ -100,6 +100,7 @@ union bnx2x_vfop_params {
struct bnx2x_mcast_ramrod_params mcast; struct bnx2x_mcast_ramrod_params mcast;
struct bnx2x_config_rss_params rss; struct bnx2x_config_rss_params rss;
struct bnx2x_vfop_qctor_params qctor; struct bnx2x_vfop_qctor_params qctor;
struct bnx2x_queue_state_params qstate;
}; };
/* forward */ /* forward */
...@@ -166,6 +167,11 @@ struct bnx2x_vfop_args_filters { ...@@ -166,6 +167,11 @@ struct bnx2x_vfop_args_filters {
atomic_t *credit; /* non NULL means 'don't consume credit' */ atomic_t *credit; /* non NULL means 'don't consume credit' */
}; };
struct bnx2x_vfop_args_tpa {
int qid;
dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF];
};
union bnx2x_vfop_args { union bnx2x_vfop_args {
struct bnx2x_vfop_args_mcast mc_list; struct bnx2x_vfop_args_mcast mc_list;
struct bnx2x_vfop_args_qctor qctor; struct bnx2x_vfop_args_qctor qctor;
...@@ -173,6 +179,7 @@ union bnx2x_vfop_args { ...@@ -173,6 +179,7 @@ union bnx2x_vfop_args {
struct bnx2x_vfop_args_defvlan defvlan; struct bnx2x_vfop_args_defvlan defvlan;
struct bnx2x_vfop_args_qx qx; struct bnx2x_vfop_args_qx qx;
struct bnx2x_vfop_args_filters filters; struct bnx2x_vfop_args_filters filters;
struct bnx2x_vfop_args_tpa tpa;
}; };
struct bnx2x_vfop { struct bnx2x_vfop {
...@@ -704,6 +711,11 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp, ...@@ -704,6 +711,11 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf, struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd); struct bnx2x_vfop_cmd *cmd);
int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
struct vfpf_tpa_tlv *tpa_tlv);
/* VF release ~ VF close + VF release-resources /* VF release ~ VF close + VF release-resources
* *
* Release is the ultimate SW shutdown and is called whenever an * Release is the ultimate SW shutdown and is called whenever an
......
...@@ -1159,7 +1159,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1159,7 +1159,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resp->pfdev_info.db_size = bp->db_size; resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
/* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); PFVF_CAP_TPA |
PFVF_CAP_TPA_UPDATE);
bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
sizeof(resp->pfdev_info.fw_ver)); sizeof(resp->pfdev_info.fw_ver));
...@@ -1910,6 +1911,75 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1910,6 +1911,75 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_mbx_resp(bp, vf); bnx2x_vf_mbx_resp(bp, vf);
} }
static int bnx2x_validate_tpa_params(struct bnx2x *bp,
struct vfpf_tpa_tlv *tpa_tlv)
{
int rc = 0;
if (tpa_tlv->tpa_client_info.max_sges_for_packet >
U_ETH_MAX_SGES_FOR_PACKET) {
rc = -EINVAL;
BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
tpa_tlv->tpa_client_info.max_sges_for_packet,
U_ETH_MAX_SGES_FOR_PACKET);
}
if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
rc = -EINVAL;
BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
tpa_tlv->tpa_client_info.max_tpa_queues,
MAX_AGG_QS(bp));
}
return rc;
}
static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
{
struct bnx2x_vfop_cmd cmd = {
.done = bnx2x_vf_mbx_resp,
.block = false,
};
struct bnx2x_queue_update_tpa_params *vf_op_params =
&vf->op_params.qstate.params.update_tpa;
struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
memset(vf_op_params, 0, sizeof(*vf_op_params));
if (bnx2x_validate_tpa_params(bp, tpa_tlv))
goto mbx_resp;
vf_op_params->complete_on_both_clients =
tpa_tlv->tpa_client_info.complete_on_both_clients;
vf_op_params->dont_verify_thr =
tpa_tlv->tpa_client_info.dont_verify_thr;
vf_op_params->max_agg_sz =
tpa_tlv->tpa_client_info.max_agg_size;
vf_op_params->max_sges_pkt =
tpa_tlv->tpa_client_info.max_sges_for_packet;
vf_op_params->max_tpa_queues =
tpa_tlv->tpa_client_info.max_tpa_queues;
vf_op_params->sge_buff_sz =
tpa_tlv->tpa_client_info.sge_buff_size;
vf_op_params->sge_pause_thr_high =
tpa_tlv->tpa_client_info.sge_pause_thr_high;
vf_op_params->sge_pause_thr_low =
tpa_tlv->tpa_client_info.sge_pause_thr_low;
vf_op_params->tpa_mode =
tpa_tlv->tpa_client_info.tpa_mode;
vf_op_params->update_ipv4 =
tpa_tlv->tpa_client_info.update_ipv4;
vf_op_params->update_ipv6 =
tpa_tlv->tpa_client_info.update_ipv6;
vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv);
mbx_resp:
if (vf->op_rc)
bnx2x_vf_mbx_resp(bp, vf);
}
/* dispatch request */ /* dispatch request */
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx) struct bnx2x_vf_mbx *mbx)
...@@ -1949,6 +2019,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, ...@@ -1949,6 +2019,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
case CHANNEL_TLV_UPDATE_RSS: case CHANNEL_TLV_UPDATE_RSS:
bnx2x_vf_mbx_update_rss(bp, vf, mbx); bnx2x_vf_mbx_update_rss(bp, vf, mbx);
return; return;
case CHANNEL_TLV_UPDATE_TPA:
bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
return;
} }
} else { } else {
......
...@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv { ...@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
#define PFVF_CAP_RSS 0x00000001 #define PFVF_CAP_RSS 0x00000001
#define PFVF_CAP_DHC 0x00000002 #define PFVF_CAP_DHC 0x00000002
#define PFVF_CAP_TPA 0x00000004 #define PFVF_CAP_TPA 0x00000004
#define PFVF_CAP_TPA_UPDATE 0x00000008
char fw_ver[32]; char fw_ver[32];
u16 db_size; u16 db_size;
u8 indices_per_sb; u8 indices_per_sb;
...@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv { ...@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
u32 rx_mask; /* see mask constants at the top of the file */ u32 rx_mask; /* see mask constants at the top of the file */
}; };
struct vfpf_tpa_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_tpa_client_info {
aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
u8 update_ipv4;
u8 update_ipv6;
u8 max_tpa_queues;
u8 max_sges_for_packet;
u8 complete_on_both_clients;
u8 dont_verify_thr;
u8 tpa_mode;
u16 sge_buff_size;
u16 max_agg_size;
u16 sge_pause_thr_low;
u16 sge_pause_thr_high;
} tpa_client_info;
};
/* close VF (disable VF) */ /* close VF (disable VF) */
struct vfpf_close_tlv { struct vfpf_close_tlv {
struct vfpf_first_tlv first_tlv; struct vfpf_first_tlv first_tlv;
...@@ -331,6 +351,7 @@ union vfpf_tlvs { ...@@ -331,6 +351,7 @@ union vfpf_tlvs {
struct vfpf_set_q_filters_tlv set_q_filters; struct vfpf_set_q_filters_tlv set_q_filters;
struct vfpf_release_tlv release; struct vfpf_release_tlv release;
struct vfpf_rss_tlv update_rss; struct vfpf_rss_tlv update_rss;
struct vfpf_tpa_tlv update_tpa;
struct channel_list_end_tlv list_end; struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size; struct tlv_buffer_size tlv_buf_size;
}; };
...@@ -405,6 +426,7 @@ enum channel_tlvs { ...@@ -405,6 +426,7 @@ enum channel_tlvs {
CHANNEL_TLV_PF_SET_VLAN, CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_UPDATE_RSS, CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_PHYS_PORT_ID, CHANNEL_TLV_PHYS_PORT_ID,
CHANNEL_TLV_UPDATE_TPA,
CHANNEL_TLV_MAX CHANNEL_TLV_MAX
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment