Commit b0301a5a authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-next'

Yuval Basson says:

====================
qed: Add xrc core support for RoCE

This patch adds support for configuring XRC and provides the necessary
APIs for rdma upper layer driver (qedr) to enable the XRC feature.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d8bed686 7bfb399e
...@@ -110,6 +110,7 @@ struct src_ent { ...@@ -110,6 +110,7 @@ struct src_ent {
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context)) #define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
#define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context))
#define TYPE0_TASK_CXT_SIZE(p_hwfn) \ #define TYPE0_TASK_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn) ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
...@@ -293,18 +294,40 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn, ...@@ -293,18 +294,40 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
return NULL; return NULL;
} }
static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn,
u32 num_srqs, u32 num_xrc_srqs)
{ {
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs; p_mgr->srq_count = num_srqs;
p_mgr->xrc_srq_count = num_xrc_srqs;
} }
u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
enum ilt_clients ilt_client)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client];
return ILT_PAGE_IN_BYTES(p_cli->p_size.val);
}
static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn)
{
u32 page_size;
page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
return page_size / XRC_SRQ_CXT_SIZE;
}
u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn)
{ {
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
u32 total_srqs;
total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count;
return p_mgr->srq_count; return total_srqs;
} }
/* set the iids count per protocol */ /* set the iids count per protocol */
...@@ -692,7 +715,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) ...@@ -692,7 +715,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
} }
/* TSDM (SRQ CONTEXT) */ /* TSDM (SRQ CONTEXT) */
total = qed_cxt_get_srq_count(p_hwfn); total = qed_cxt_get_total_srq_count(p_hwfn);
if (total) { if (total) {
p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]); p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
...@@ -1962,11 +1985,9 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, ...@@ -1962,11 +1985,9 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
struct qed_rdma_pf_params *p_params, struct qed_rdma_pf_params *p_params,
u32 num_tasks) u32 num_tasks)
{ {
u32 num_cons, num_qps, num_srqs; u32 num_cons, num_qps;
enum protocol_type proto; enum protocol_type proto;
num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
...@@ -1989,6 +2010,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, ...@@ -1989,6 +2010,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
} }
if (num_cons && num_tasks) { if (num_cons && num_tasks) {
u32 num_srqs, num_xrc_srqs;
qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0); qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
/* Deliberatly passing ROCE for tasks id. This is because /* Deliberatly passing ROCE for tasks id. This is because
...@@ -1997,7 +2020,13 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, ...@@ -1997,7 +2020,13 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE, qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
QED_CXT_ROCE_TID_SEG, 1, QED_CXT_ROCE_TID_SEG, 1,
num_tasks, false); num_tasks, false);
qed_cxt_set_srq_count(p_hwfn, num_srqs);
num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
/* XRC SRQs populate a single ILT page */
num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn);
qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs);
} else { } else {
DP_INFO(p_hwfn->cdev, DP_INFO(p_hwfn->cdev,
"RDMA personality used without setting params!\n"); "RDMA personality used without setting params!\n");
...@@ -2163,10 +2192,17 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, ...@@ -2163,10 +2192,17 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
p_blk = &p_cli->pf_blks[CDUC_BLK]; p_blk = &p_cli->pf_blks[CDUC_BLK];
break; break;
case QED_ELEM_SRQ: case QED_ELEM_SRQ:
/* The first ILT page is not used for regular SRQs. Skip it. */
iid += p_hwfn->p_cxt_mngr->xrc_srq_count;
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
elem_size = SRQ_CXT_SIZE; elem_size = SRQ_CXT_SIZE;
p_blk = &p_cli->pf_blks[SRQ_BLK]; p_blk = &p_cli->pf_blks[SRQ_BLK];
break; break;
case QED_ELEM_XRC_SRQ:
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
elem_size = XRC_SRQ_CXT_SIZE;
p_blk = &p_cli->pf_blks[SRQ_BLK];
break;
case QED_ELEM_TASK: case QED_ELEM_TASK:
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
...@@ -2386,8 +2422,12 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto) ...@@ -2386,8 +2422,12 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
return rc; return rc;
/* Free TSDM CXT */ /* Free TSDM CXT */
rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0, rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0,
qed_cxt_get_srq_count(p_hwfn)); p_hwfn->p_cxt_mngr->xrc_srq_count);
rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ,
p_hwfn->p_cxt_mngr->xrc_srq_count,
p_hwfn->p_cxt_mngr->srq_count);
return rc; return rc;
} }
......
...@@ -82,7 +82,8 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, ...@@ -82,7 +82,8 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
enum qed_cxt_elem_type { enum qed_cxt_elem_type {
QED_ELEM_CXT, QED_ELEM_CXT,
QED_ELEM_SRQ, QED_ELEM_SRQ,
QED_ELEM_TASK QED_ELEM_TASK,
QED_ELEM_XRC_SRQ,
}; };
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
...@@ -235,7 +236,6 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, ...@@ -235,7 +236,6 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type); enum protocol_type type);
u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
enum protocol_type type); enum protocol_type type);
u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn);
int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto); int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_WORKING_MEM 0 #define QED_CTX_WORKING_MEM 0
...@@ -358,6 +358,7 @@ struct qed_cxt_mngr { ...@@ -358,6 +358,7 @@ struct qed_cxt_mngr {
/* total number of SRQ's for this hwfn */ /* total number of SRQ's for this hwfn */
u32 srq_count; u32 srq_count;
u32 xrc_srq_count;
/* Maximal number of L2 steering filters */ /* Maximal number of L2 steering filters */
u32 arfs_count; u32 arfs_count;
...@@ -372,4 +373,9 @@ u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn); ...@@ -372,4 +373,9 @@ u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn); u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn); u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
enum ilt_clients ilt_client);
u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn);
#endif #endif
...@@ -2269,6 +2269,7 @@ int qed_resc_alloc(struct qed_dev *cdev) ...@@ -2269,6 +2269,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
/* EQ */ /* EQ */
n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
u32 n_srq = qed_cxt_get_total_srq_count(p_hwfn);
enum protocol_type rdma_proto; enum protocol_type rdma_proto;
if (QED_IS_ROCE_PERSONALITY(p_hwfn)) if (QED_IS_ROCE_PERSONALITY(p_hwfn))
...@@ -2279,7 +2280,10 @@ int qed_resc_alloc(struct qed_dev *cdev) ...@@ -2279,7 +2280,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
num_cons = qed_cxt_get_proto_cid_count(p_hwfn, num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
rdma_proto, rdma_proto,
NULL) * 2; NULL) * 2;
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; /* EQ should be able to get events from all SRQ's
* at the same time
*/
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
num_cons = num_cons =
qed_cxt_get_proto_cid_count(p_hwfn, qed_cxt_get_proto_cid_count(p_hwfn,
......
...@@ -212,13 +212,22 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) ...@@ -212,13 +212,22 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
goto free_rdma_port; goto free_rdma_port;
} }
/* Allocate bit map for XRC Domains */
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map,
QED_RDMA_MAX_XRCDS, "XRCD");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate xrcd_map,rc = %d\n", rc);
goto free_pd_map;
}
/* Allocate DPI bitmap */ /* Allocate DPI bitmap */
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
p_hwfn->dpi_count, "DPI"); p_hwfn->dpi_count, "DPI");
if (rc) { if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate DPI bitmap, rc = %d\n", rc); "Failed to allocate DPI bitmap, rc = %d\n", rc);
goto free_pd_map; goto free_xrcd_map;
} }
/* Allocate bitmap for cq's. The maximum number of CQs is bound to /* Allocate bitmap for cq's. The maximum number of CQs is bound to
...@@ -271,14 +280,27 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) ...@@ -271,14 +280,27 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
goto free_cid_map; goto free_cid_map;
} }
/* The first SRQ follows the last XRC SRQ. This means that the
* SRQ IDs start from an offset equals to max_xrc_srqs.
*/
p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count;
rc = qed_rdma_bmap_alloc(p_hwfn,
&p_rdma_info->xrc_srq_map,
p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate xrc srq bitmap, rc = %d\n", rc);
goto free_real_cid_map;
}
/* Allocate bitmap for srqs */ /* Allocate bitmap for srqs */
p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn); p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count;
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
p_rdma_info->num_srqs, "SRQ"); p_rdma_info->num_srqs, "SRQ");
if (rc) { if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate srq bitmap, rc = %d\n", rc); "Failed to allocate srq bitmap, rc = %d\n", rc);
goto free_real_cid_map; goto free_xrc_srq_map;
} }
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) if (QED_IS_IWARP_PERSONALITY(p_hwfn))
...@@ -292,6 +314,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) ...@@ -292,6 +314,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
free_srq_map: free_srq_map:
kfree(p_rdma_info->srq_map.bitmap); kfree(p_rdma_info->srq_map.bitmap);
free_xrc_srq_map:
kfree(p_rdma_info->xrc_srq_map.bitmap);
free_real_cid_map: free_real_cid_map:
kfree(p_rdma_info->real_cid_map.bitmap); kfree(p_rdma_info->real_cid_map.bitmap);
free_cid_map: free_cid_map:
...@@ -304,6 +328,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) ...@@ -304,6 +328,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
kfree(p_rdma_info->cq_map.bitmap); kfree(p_rdma_info->cq_map.bitmap);
free_dpi_map: free_dpi_map:
kfree(p_rdma_info->dpi_map.bitmap); kfree(p_rdma_info->dpi_map.bitmap);
free_xrcd_map:
kfree(p_rdma_info->xrcd_map.bitmap);
free_pd_map: free_pd_map:
kfree(p_rdma_info->pd_map.bitmap); kfree(p_rdma_info->pd_map.bitmap);
free_rdma_port: free_rdma_port:
...@@ -377,6 +403,7 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) ...@@ -377,6 +403,7 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1);
kfree(p_rdma_info->port); kfree(p_rdma_info->port);
kfree(p_rdma_info->dev); kfree(p_rdma_info->dev);
...@@ -612,7 +639,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, ...@@ -612,7 +639,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
QED_RDMA_CNQ_RAM); QED_RDMA_CNQ_RAM);
p_params_header->num_cnqs = params->desired_cnq; p_params_header->num_cnqs = params->desired_cnq;
p_params_header->first_reg_srq_id =
cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset);
p_params_header->reg_srq_base_addr =
cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM));
if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
p_params_header->cq_ring_mode = 1; p_params_header->cq_ring_mode = 1;
else else
...@@ -983,6 +1013,41 @@ static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) ...@@ -983,6 +1013,41 @@ static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock); spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
} }
static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
u32 returned_id;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n");
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
rc = qed_rdma_bmap_alloc_id(p_hwfn,
&p_hwfn->p_rdma_info->xrcd_map,
&returned_id);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
if (rc) {
DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n");
return rc;
}
*xrcd_id = (u16)returned_id;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc);
return rc;
}
static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id);
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
static enum qed_rdma_toggle_bit static enum qed_rdma_toggle_bit
qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
{ {
...@@ -1306,6 +1371,8 @@ qed_rdma_create_qp(void *rdma_cxt, ...@@ -1306,6 +1371,8 @@ qed_rdma_create_qp(void *rdma_cxt,
qp->resp_offloaded = false; qp->resp_offloaded = false;
qp->e2e_flow_control_en = qp->use_srq ? false : true; qp->e2e_flow_control_en = qp->use_srq ? false : true;
qp->stats_queue = in_params->stats_queue; qp->stats_queue = in_params->stats_queue;
qp->qp_type = in_params->qp_type;
qp->xrcd_id = in_params->xrcd_id;
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
rc = qed_iwarp_create_qp(p_hwfn, qp, out_params); rc = qed_iwarp_create_qp(p_hwfn, qp, out_params);
...@@ -1418,6 +1485,18 @@ static int qed_rdma_modify_qp(void *rdma_cxt, ...@@ -1418,6 +1485,18 @@ static int qed_rdma_modify_qp(void *rdma_cxt,
qp->cur_state); qp->cur_state);
} }
switch (qp->qp_type) {
case QED_RDMA_QP_TYPE_XRC_INI:
qp->has_req = 1;
break;
case QED_RDMA_QP_TYPE_XRC_TGT:
qp->has_resp = 1;
break;
default:
qp->has_req = 1;
qp->has_resp = 1;
}
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
enum qed_iwarp_qp_state new_state = enum qed_iwarp_qp_state new_state =
qed_roce2iwarp_state(qp->cur_state); qed_roce2iwarp_state(qp->cur_state);
...@@ -1657,6 +1736,15 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) ...@@ -1657,6 +1736,15 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
return QED_AFFIN_HWFN(cdev); return QED_AFFIN_HWFN(cdev);
} }
static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn,
bool is_xrc)
{
if (is_xrc)
return &p_hwfn->p_rdma_info->xrc_srq_map;
return &p_hwfn->p_rdma_info->srq_map;
}
static int qed_rdma_modify_srq(void *rdma_cxt, static int qed_rdma_modify_srq(void *rdma_cxt,
struct qed_rdma_modify_srq_in_params *in_params) struct qed_rdma_modify_srq_in_params *in_params)
{ {
...@@ -1686,8 +1774,8 @@ static int qed_rdma_modify_srq(void *rdma_cxt, ...@@ -1686,8 +1774,8 @@ static int qed_rdma_modify_srq(void *rdma_cxt,
if (rc) if (rc)
return rc; return rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x", DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n",
in_params->srq_id); in_params->srq_id, in_params->is_xrc);
return rc; return rc;
} }
...@@ -1702,6 +1790,7 @@ qed_rdma_destroy_srq(void *rdma_cxt, ...@@ -1702,6 +1790,7 @@ qed_rdma_destroy_srq(void *rdma_cxt,
struct qed_spq_entry *p_ent; struct qed_spq_entry *p_ent;
struct qed_bmap *bmap; struct qed_bmap *bmap;
u16 opaque_fid; u16 opaque_fid;
u16 offset;
int rc; int rc;
opaque_fid = p_hwfn->hw_info.opaque_fid; opaque_fid = p_hwfn->hw_info.opaque_fid;
...@@ -1723,14 +1812,16 @@ qed_rdma_destroy_srq(void *rdma_cxt, ...@@ -1723,14 +1812,16 @@ qed_rdma_destroy_srq(void *rdma_cxt,
if (rc) if (rc)
return rc; return rc;
bmap = &p_hwfn->p_rdma_info->srq_map; bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
spin_lock_bh(&p_hwfn->p_rdma_info->lock); spin_lock_bh(&p_hwfn->p_rdma_info->lock);
qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id); qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock); spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x", DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
in_params->srq_id); "XRC/SRQ destroyed Id = %x, is_xrc=%u\n",
in_params->srq_id, in_params->is_xrc);
return rc; return rc;
} }
...@@ -1748,24 +1839,26 @@ qed_rdma_create_srq(void *rdma_cxt, ...@@ -1748,24 +1839,26 @@ qed_rdma_create_srq(void *rdma_cxt,
u16 opaque_fid, srq_id; u16 opaque_fid, srq_id;
struct qed_bmap *bmap; struct qed_bmap *bmap;
u32 returned_id; u32 returned_id;
u16 offset;
int rc; int rc;
bmap = &p_hwfn->p_rdma_info->srq_map; bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
spin_lock_bh(&p_hwfn->p_rdma_info->lock); spin_lock_bh(&p_hwfn->p_rdma_info->lock);
rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock); spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
if (rc) { if (rc) {
DP_NOTICE(p_hwfn, "failed to allocate srq id\n"); DP_NOTICE(p_hwfn,
"failed to allocate xrc/srq id (is_xrc=%u)\n",
in_params->is_xrc);
return rc; return rc;
} }
elem_type = QED_ELEM_SRQ; elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ);
rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
if (rc) if (rc)
goto err; goto err;
/* returned id is no greater than u16 */
srq_id = (u16)returned_id;
opaque_fid = p_hwfn->hw_info.opaque_fid; opaque_fid = p_hwfn->hw_info.opaque_fid;
opaque_fid = p_hwfn->hw_info.opaque_fid; opaque_fid = p_hwfn->hw_info.opaque_fid;
...@@ -1782,20 +1875,34 @@ qed_rdma_create_srq(void *rdma_cxt, ...@@ -1782,20 +1875,34 @@ qed_rdma_create_srq(void *rdma_cxt,
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr);
p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); p_ramrod->pd_id = cpu_to_le16(in_params->pd_id);
p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
p_ramrod->page_size = cpu_to_le16(in_params->page_size); p_ramrod->page_size = cpu_to_le16(in_params->page_size);
DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr);
offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
srq_id = (u16)returned_id + offset;
p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
if (in_params->is_xrc) {
SET_FIELD(p_ramrod->flags,
RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1);
SET_FIELD(p_ramrod->flags,
RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN,
in_params->reserved_key_en);
p_ramrod->xrc_srq_cq_cid =
cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
in_params->cq_cid);
p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id);
}
rc = qed_spq_post(p_hwfn, p_ent, NULL); rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (rc) if (rc)
goto err; goto err;
out_params->srq_id = srq_id; out_params->srq_id = srq_id;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, DP_VERBOSE(p_hwfn,
"SRQ created Id = %x\n", out_params->srq_id); QED_MSG_RDMA,
"XRC/SRQ created Id = %x (is_xrc=%u)\n",
out_params->srq_id, in_params->is_xrc);
return rc; return rc;
err: err:
...@@ -1961,6 +2068,8 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { ...@@ -1961,6 +2068,8 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
.rdma_alloc_pd = &qed_rdma_alloc_pd, .rdma_alloc_pd = &qed_rdma_alloc_pd,
.rdma_dealloc_pd = &qed_rdma_free_pd, .rdma_dealloc_pd = &qed_rdma_free_pd,
.rdma_alloc_xrcd = &qed_rdma_alloc_xrcd,
.rdma_dealloc_xrcd = &qed_rdma_free_xrcd,
.rdma_create_cq = &qed_rdma_create_cq, .rdma_create_cq = &qed_rdma_create_cq,
.rdma_destroy_cq = &qed_rdma_destroy_cq, .rdma_destroy_cq = &qed_rdma_destroy_cq,
.rdma_create_qp = &qed_rdma_create_qp, .rdma_create_qp = &qed_rdma_create_qp,
......
...@@ -63,6 +63,11 @@ ...@@ -63,6 +63,11 @@
#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1) #define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1) #define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
/* Up to 2^16 XRC Domains are supported, but the actual number of supported XRC
* SRQs is much smaller so there's no need to have that many domains.
*/
#define QED_RDMA_MAX_XRCDS (roundup_pow_of_two(RDMA_MAX_XRC_SRQS))
enum qed_rdma_toggle_bit { enum qed_rdma_toggle_bit {
QED_RDMA_TOGGLE_BIT_CLEAR = 0, QED_RDMA_TOGGLE_BIT_CLEAR = 0,
QED_RDMA_TOGGLE_BIT_SET = 1 QED_RDMA_TOGGLE_BIT_SET = 1
...@@ -81,9 +86,11 @@ struct qed_rdma_info { ...@@ -81,9 +86,11 @@ struct qed_rdma_info {
struct qed_bmap cq_map; struct qed_bmap cq_map;
struct qed_bmap pd_map; struct qed_bmap pd_map;
struct qed_bmap xrcd_map;
struct qed_bmap tid_map; struct qed_bmap tid_map;
struct qed_bmap qp_map; struct qed_bmap qp_map;
struct qed_bmap srq_map; struct qed_bmap srq_map;
struct qed_bmap xrc_srq_map;
struct qed_bmap cid_map; struct qed_bmap cid_map;
struct qed_bmap tcp_cid_map; struct qed_bmap tcp_cid_map;
struct qed_bmap real_cid_map; struct qed_bmap real_cid_map;
...@@ -111,6 +118,7 @@ struct qed_rdma_qp { ...@@ -111,6 +118,7 @@ struct qed_rdma_qp {
u32 qpid; u32 qpid;
u16 icid; u16 icid;
enum qed_roce_qp_state cur_state; enum qed_roce_qp_state cur_state;
enum qed_rdma_qp_type qp_type;
enum qed_iwarp_qp_state iwarp_state; enum qed_iwarp_qp_state iwarp_state;
bool use_srq; bool use_srq;
bool signal_all; bool signal_all;
...@@ -153,18 +161,21 @@ struct qed_rdma_qp { ...@@ -153,18 +161,21 @@ struct qed_rdma_qp {
dma_addr_t orq_phys_addr; dma_addr_t orq_phys_addr;
u8 orq_num_pages; u8 orq_num_pages;
bool req_offloaded; bool req_offloaded;
bool has_req;
/* responder */ /* responder */
u8 max_rd_atomic_resp; u8 max_rd_atomic_resp;
u32 rq_psn; u32 rq_psn;
u16 rq_cq_id; u16 rq_cq_id;
u16 rq_num_pages; u16 rq_num_pages;
u16 xrcd_id;
dma_addr_t rq_pbl_ptr; dma_addr_t rq_pbl_ptr;
void *irq; void *irq;
dma_addr_t irq_phys_addr; dma_addr_t irq_phys_addr;
u8 irq_num_pages; u8 irq_num_pages;
bool resp_offloaded; bool resp_offloaded;
u32 cq_prod; u32 cq_prod;
bool has_resp;
u8 remote_mac_addr[6]; u8 remote_mac_addr[6];
u8 local_mac_addr[6]; u8 local_mac_addr[6];
...@@ -174,6 +185,14 @@ struct qed_rdma_qp { ...@@ -174,6 +185,14 @@ struct qed_rdma_qp {
struct qed_iwarp_ep *ep; struct qed_iwarp_ep *ep;
}; };
static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp)
{
if (qp->qp_type == QED_RDMA_QP_TYPE_XRC_TGT ||
qp->qp_type == QED_RDMA_QP_TYPE_XRC_INI)
return true;
return false;
}
#if IS_ENABLED(CONFIG_QED_RDMA) #if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
......
...@@ -254,6 +254,9 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, ...@@ -254,6 +254,9 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
int rc; int rc;
u8 tc; u8 tc;
if (!qp->has_resp)
return 0;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
/* Allocate DMA-able memory for IRQ */ /* Allocate DMA-able memory for IRQ */
...@@ -315,6 +318,10 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, ...@@ -315,6 +318,10 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
qp->min_rnr_nak_timer); qp->min_rnr_nak_timer);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
qed_rdma_is_xrc_qp(qp));
p_ramrod->max_ird = qp->max_rd_atomic_resp; p_ramrod->max_ird = qp->max_rd_atomic_resp;
p_ramrod->traffic_class = qp->traffic_class_tos; p_ramrod->traffic_class = qp->traffic_class_tos;
p_ramrod->hop_limit = qp->hop_limit_ttl; p_ramrod->hop_limit = qp->hop_limit_ttl;
...@@ -335,6 +342,7 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, ...@@ -335,6 +342,7 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->rq_cq_id); qp->rq_cq_id);
p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id);
tc = qed_roce_get_qp_tc(p_hwfn, qp); tc = qed_roce_get_qp_tc(p_hwfn, qp);
regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
...@@ -395,6 +403,9 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, ...@@ -395,6 +403,9 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
int rc; int rc;
u8 tc; u8 tc;
if (!qp->has_req)
return 0;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
/* Allocate DMA-able memory for ORQ */ /* Allocate DMA-able memory for ORQ */
...@@ -444,6 +455,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, ...@@ -444,6 +455,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
qp->rnr_retry_cnt); qp->rnr_retry_cnt);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
qed_rdma_is_xrc_qp(qp));
p_ramrod->max_ord = qp->max_rd_atomic_req; p_ramrod->max_ord = qp->max_rd_atomic_req;
p_ramrod->traffic_class = qp->traffic_class_tos; p_ramrod->traffic_class = qp->traffic_class_tos;
p_ramrod->hop_limit = qp->hop_limit_ttl; p_ramrod->hop_limit = qp->hop_limit_ttl;
...@@ -517,6 +532,9 @@ static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, ...@@ -517,6 +532,9 @@ static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent; struct qed_spq_entry *p_ent;
int rc; int rc;
if (!qp->has_resp)
return 0;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (move_to_err && !qp->resp_offloaded) if (move_to_err && !qp->resp_offloaded)
...@@ -611,6 +629,9 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, ...@@ -611,6 +629,9 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent; struct qed_spq_entry *p_ent;
int rc; int rc;
if (!qp->has_req)
return 0;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (move_to_err && !(qp->req_offloaded)) if (move_to_err && !(qp->req_offloaded))
...@@ -705,6 +726,11 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, ...@@ -705,6 +726,11 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
dma_addr_t ramrod_res_phys; dma_addr_t ramrod_res_phys;
int rc; int rc;
if (!qp->has_resp) {
*cq_prod = 0;
return 0;
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
*cq_prod = qp->cq_prod; *cq_prod = qp->cq_prod;
...@@ -785,6 +811,9 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, ...@@ -785,6 +811,9 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
dma_addr_t ramrod_res_phys; dma_addr_t ramrod_res_phys;
int rc = -ENOMEM; int rc = -ENOMEM;
if (!qp->has_req)
return 0;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (!qp->req_offloaded) if (!qp->req_offloaded)
......
...@@ -53,6 +53,13 @@ enum qed_roce_qp_state { ...@@ -53,6 +53,13 @@ enum qed_roce_qp_state {
QED_ROCE_QP_STATE_SQE QED_ROCE_QP_STATE_SQE
}; };
enum qed_rdma_qp_type {
QED_RDMA_QP_TYPE_RC,
QED_RDMA_QP_TYPE_XRC_INI,
QED_RDMA_QP_TYPE_XRC_TGT,
QED_RDMA_QP_TYPE_INVAL = 0xffff,
};
enum qed_rdma_tid_type { enum qed_rdma_tid_type {
QED_RDMA_TID_REGISTERED_MR, QED_RDMA_TID_REGISTERED_MR,
QED_RDMA_TID_FMR, QED_RDMA_TID_FMR,
...@@ -291,6 +298,12 @@ struct qed_rdma_create_srq_in_params { ...@@ -291,6 +298,12 @@ struct qed_rdma_create_srq_in_params {
u16 num_pages; u16 num_pages;
u16 pd_id; u16 pd_id;
u16 page_size; u16 page_size;
/* XRC related only */
bool reserved_key_en;
bool is_xrc;
u32 cq_cid;
u16 xrcd_id;
}; };
struct qed_rdma_destroy_cq_in_params { struct qed_rdma_destroy_cq_in_params {
...@@ -319,7 +332,9 @@ struct qed_rdma_create_qp_in_params { ...@@ -319,7 +332,9 @@ struct qed_rdma_create_qp_in_params {
u16 rq_num_pages; u16 rq_num_pages;
u64 rq_pbl_ptr; u64 rq_pbl_ptr;
u16 srq_id; u16 srq_id;
u16 xrcd_id;
u8 stats_queue; u8 stats_queue;
enum qed_rdma_qp_type qp_type;
}; };
struct qed_rdma_create_qp_out_params { struct qed_rdma_create_qp_out_params {
...@@ -429,11 +444,13 @@ struct qed_rdma_create_srq_out_params { ...@@ -429,11 +444,13 @@ struct qed_rdma_create_srq_out_params {
struct qed_rdma_destroy_srq_in_params { struct qed_rdma_destroy_srq_in_params {
u16 srq_id; u16 srq_id;
bool is_xrc;
}; };
struct qed_rdma_modify_srq_in_params { struct qed_rdma_modify_srq_in_params {
u32 wqe_limit; u32 wqe_limit;
u16 srq_id; u16 srq_id;
bool is_xrc;
}; };
struct qed_rdma_stats_out_params { struct qed_rdma_stats_out_params {
...@@ -611,6 +628,8 @@ struct qed_rdma_ops { ...@@ -611,6 +628,8 @@ struct qed_rdma_ops {
int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt); int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd); int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd); void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
int (*rdma_alloc_xrcd)(void *rdma_cxt, u16 *xrcd);
void (*rdma_dealloc_xrcd)(void *rdma_cxt, u16 xrcd);
int (*rdma_create_cq)(void *rdma_cxt, int (*rdma_create_cq)(void *rdma_cxt,
struct qed_rdma_create_cq_in_params *params, struct qed_rdma_create_cq_in_params *params,
u16 *icid); u16 *icid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment