Commit 3a849872 authored by Sindhu Devale's avatar Sindhu Devale Committed by Leon Romanovsky

RDMA/irdma: Allow accurate reporting on QP max send/recv WR

Currently the attribute cap.max_send_wr and cap.max_recv_wr
sent from user-space during create QP are the provider computed
SQ/RQ depth as opposed to raw values passed from application.
This inhibits computation of an accurate value for max_send_wr
and max_recv_wr for this QP in the kernel which matches the value
returned in user create QP. Also these capabilities needs to be
reported from the driver in query QP.

Add support by extending the ABI to allow the raw cap.max_send_wr and
cap.max_recv_wr to be passed from user-space, while keeping compatibility
for the older scheme.

The internal HW depth and shift needed for the WQs needs to be computed
now for both kernel and user-mode QPs. Add new helpers to assist with this:
irdma_uk_calc_depth_shift_sq, irdma_uk_calc_depth_shift_rq and
irdma_uk_calc_depth_shift_wq.

Consolidate all the user mode QP setup into a new function
irdma_setup_umode_qp which keeps it with its counterpart
irdma_setup_kmode_qp.
Signed-off-by: default avatarYouvaraj Sagar <youvaraj.sagar@intel.com>
Signed-off-by: default avatarSindhu Devale <sindhu.devale@intel.com>
Signed-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Link: https://lore.kernel.org/r/20230725155525.1081-2-shiraz.saleem@intel.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent cb06b6b3
......@@ -1414,6 +1414,78 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
}
/**
* irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
* @ukinfo: qp initialization info
* @sq_shift: Returns shift of SQ
* @rq_shift: Returns shift of RQ
*/
void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
u8 *rq_shift)
{
bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
irdma_get_wqe_shift(ukinfo->uk_attrs,
imm_support ? ukinfo->max_sq_frag_cnt + 1 :
ukinfo->max_sq_frag_cnt,
ukinfo->max_inline_data, sq_shift);
irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
rq_shift);
if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
if (ukinfo->abi_ver > 4)
*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
}
}
/**
* irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
* @ukinfo: qp initialization info
* @sq_depth: Returns depth of SQ
* @sq_shift: Returns shift of SQ
*/
int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
u32 *sq_depth, u8 *sq_shift)
{
bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
int status;
irdma_get_wqe_shift(ukinfo->uk_attrs,
imm_support ? ukinfo->max_sq_frag_cnt + 1 :
ukinfo->max_sq_frag_cnt,
ukinfo->max_inline_data, sq_shift);
status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
*sq_shift, sq_depth);
return status;
}
/**
* irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
* @ukinfo: qp initialization info
* @rq_depth: Returns depth of RQ
* @rq_shift: Returns shift of RQ
*/
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
u32 *rq_depth, u8 *rq_shift)
{
int status;
irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
rq_shift);
if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
if (ukinfo->abi_ver > 4)
*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
}
status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
*rq_shift, rq_depth);
return status;
}
/**
* irdma_uk_qp_init - initialize shared qp
* @qp: hw qp (user and kernel)
......@@ -1428,23 +1500,12 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
{
int ret_code = 0;
u32 sq_ring_size;
u8 sqshift, rqshift;
qp->uk_attrs = info->uk_attrs;
if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
return -EINVAL;
irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
info->max_inline_data, &sqshift);
if (info->abi_ver > 4)
rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
} else {
irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
info->max_inline_data, &sqshift);
}
qp->qp_caps = info->qp_caps;
qp->sq_base = info->sq;
qp->rq_base = info->rq;
......@@ -1458,7 +1519,7 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->sq_size = info->sq_size;
qp->push_mode = false;
qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
sq_ring_size = qp->sq_size << sqshift;
sq_ring_size = qp->sq_size << info->sq_shift;
IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
if (info->first_sq_wq) {
......@@ -1473,9 +1534,9 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->rq_size = info->rq_size;
qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
qp->max_inline_data = info->max_inline_data;
qp->rq_wqe_size = rqshift;
qp->rq_wqe_size = info->rq_shift;
IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
qp->rq_wqe_size_multiplier = 1 << rqshift;
qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
else
......
......@@ -295,6 +295,12 @@ void irdma_uk_cq_init(struct irdma_cq_uk *cq,
struct irdma_cq_uk_init_info *info);
int irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
u8 *rq_shift);
int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
u32 *sq_depth, u8 *sq_shift);
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
u32 *rq_depth, u8 *rq_shift);
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
......@@ -374,8 +380,12 @@ struct irdma_qp_uk_init_info {
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
u32 sq_depth;
u32 rq_depth;
u8 first_sq_wq;
u8 type;
u8 sq_shift;
u8 rq_shift;
int abi_ver;
bool legacy_mode;
};
......
This diff is collapsed.
......@@ -18,7 +18,8 @@ struct irdma_ucontext {
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
int abi_ver;
bool legacy_mode;
u8 legacy_mode : 1;
u8 use_raw_attrs : 1;
};
struct irdma_pd {
......
......@@ -22,10 +22,15 @@ enum irdma_memreg_type {
IRDMA_MEMREG_TYPE_CQ = 2,
};
enum {
IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
};
struct irdma_alloc_ucontext_req {
__u32 rsvd32;
__u8 userspace_ver;
__u8 rsvd8[3];
__aligned_u64 comp_mask;
};
struct irdma_alloc_ucontext_resp {
......@@ -46,6 +51,7 @@ struct irdma_alloc_ucontext_resp {
__u16 max_hw_sq_chunk;
__u8 hw_rev;
__u8 rsvd2;
__aligned_u64 comp_mask;
};
struct irdma_alloc_pd_resp {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment