Commit da2924bd authored by Gal Pressman's avatar Gal Pressman Committed by Jason Gunthorpe

RDMA/efa: Expose minimum SQ size

The device reports the minimum SQ size required for creation.

This patch queries the min SQ size and reports it back to the userspace
library.

Link: https://lore.kernel.org/r/20200722140312.3651-3-galpress@amazon.comReviewed-by: default avatarFiras JahJah <firasj@amazon.com>
Reviewed-by: default avatarShadi Ammouri <sammouri@amazon.com>
Signed-off-by: default avatarGal Pressman <galpress@amazon.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 556c811f
...@@ -606,8 +606,8 @@ struct efa_admin_feature_queue_attr_desc { ...@@ -606,8 +606,8 @@ struct efa_admin_feature_queue_attr_desc {
/* Number of sub-CQs to be created for each CQ */ /* Number of sub-CQs to be created for each CQ */
u16 sub_cqs_per_cq; u16 sub_cqs_per_cq;
/* MBZ */ /* Minimum number of WQEs per SQ */
u16 reserved; u16 min_sq_depth;
/* Maximum number of SGEs (buffers) allowed for a single send WQE */ /* Maximum number of SGEs (buffers) allowed for a single send WQE */
u16 max_wr_send_sges; u16 max_wr_send_sges;
......
...@@ -481,6 +481,7 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, ...@@ -481,6 +481,7 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->sub_cqs_per_cq = resp.u.queue_attr.sub_cqs_per_cq; result->sub_cqs_per_cq = resp.u.queue_attr.sub_cqs_per_cq;
result->max_wr_rdma_sge = resp.u.queue_attr.max_wr_rdma_sges; result->max_wr_rdma_sge = resp.u.queue_attr.max_wr_rdma_sges;
result->max_tx_batch = resp.u.queue_attr.max_tx_batch; result->max_tx_batch = resp.u.queue_attr.max_tx_batch;
result->min_sq_depth = resp.u.queue_attr.min_sq_depth;
err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR); err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR);
if (err) { if (err) {
......
...@@ -128,6 +128,7 @@ struct efa_com_get_device_attr_result { ...@@ -128,6 +128,7 @@ struct efa_com_get_device_attr_result {
u16 max_rq_sge; u16 max_rq_sge;
u16 max_wr_rdma_sge; u16 max_wr_rdma_sge;
u16 max_tx_batch; u16 max_tx_batch;
u16 min_sq_depth;
u8 db_bar; u8 db_bar;
}; };
......
...@@ -1526,6 +1526,7 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata) ...@@ -1526,6 +1526,7 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
resp.inline_buf_size = dev->dev_attr.inline_buf_size; resp.inline_buf_size = dev->dev_attr.inline_buf_size;
resp.max_llq_size = dev->dev_attr.max_llq_size; resp.max_llq_size = dev->dev_attr.max_llq_size;
resp.max_tx_batch = dev->dev_attr.max_tx_batch; resp.max_tx_batch = dev->dev_attr.max_tx_batch;
resp.min_sq_wr = dev->dev_attr.min_sq_depth;
if (udata && udata->outlen) { if (udata && udata->outlen) {
err = ib_copy_to_udata(udata, &resp, err = ib_copy_to_udata(udata, &resp,
......
...@@ -32,7 +32,8 @@ struct efa_ibv_alloc_ucontext_resp { ...@@ -32,7 +32,8 @@ struct efa_ibv_alloc_ucontext_resp {
__u16 inline_buf_size; __u16 inline_buf_size;
__u32 max_llq_size; /* bytes */ __u32 max_llq_size; /* bytes */
__u16 max_tx_batch; /* units of 64 bytes */ __u16 max_tx_batch; /* units of 64 bytes */
__u8 reserved_90[6]; __u16 min_sq_wr;
__u8 reserved_a0[4];
}; };
struct efa_ibv_alloc_pd_resp { struct efa_ibv_alloc_pd_resp {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment