Commit 354462eb authored by Guoqing Jiang's avatar Guoqing Jiang Committed by Jason Gunthorpe

RDMA/rtrs: Rename cq_size/queue_size to cq_num/queue_num

Those variables are passed to create_cq, create_qp, rtrs_iu_alloc and
rtrs_iu_free, so these *_size means the num of unit. And cq_size also
means number of cq element.

Also move the setting of cq_num to common path.

Link: https://lore.kernel.org/r/20210614090337.29557-5-jinpu.wang@ionos.comSigned-off-by: default avatarGuoqing Jiang <guoqing.jiang@cloud.ionos.com>
Reviewed-by: default avatarMd Haris Iqbal <haris.iqbal@cloud.ionos.com>
Signed-off-by: default avatarJack Wang <jinpu.wang@cloud.ionos.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent b012f0ad
...@@ -1572,7 +1572,7 @@ static void destroy_con(struct rtrs_clt_con *con) ...@@ -1572,7 +1572,7 @@ static void destroy_con(struct rtrs_clt_con *con)
static int create_con_cq_qp(struct rtrs_clt_con *con) static int create_con_cq_qp(struct rtrs_clt_con *con)
{ {
struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
u32 max_send_wr, max_recv_wr, cq_size, max_send_sge; u32 max_send_wr, max_recv_wr, cq_num, max_send_sge;
int err, cq_vector; int err, cq_vector;
struct rtrs_msg_rkey_rsp *rsp; struct rtrs_msg_rkey_rsp *rsp;
...@@ -1628,26 +1628,26 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) ...@@ -1628,26 +1628,26 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
sess->queue_depth * 3 + 1); sess->queue_depth * 3 + 1);
max_send_sge = sess->clt->max_segments + 1; max_send_sge = sess->clt->max_segments + 1;
} }
cq_size = max_send_wr + max_recv_wr; cq_num = max_send_wr + max_recv_wr;
/* alloc iu to recv new rkey reply when server reports flags set */ /* alloc iu to recv new rkey reply when server reports flags set */
if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
con->rsp_ius = rtrs_iu_alloc(cq_size, sizeof(*rsp), con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
GFP_KERNEL, sess->s.dev->ib_dev, GFP_KERNEL, sess->s.dev->ib_dev,
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
rtrs_clt_rdma_done); rtrs_clt_rdma_done);
if (!con->rsp_ius) if (!con->rsp_ius)
return -ENOMEM; return -ENOMEM;
con->queue_size = cq_size; con->queue_num = cq_num;
} }
cq_size = max_send_wr + max_recv_wr; cq_num = max_send_wr + max_recv_wr;
cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors; cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
if (con->c.cid >= sess->s.irq_con_num) if (con->c.cid >= sess->s.irq_con_num)
err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge, err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
cq_vector, cq_size, max_send_wr, cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_DIRECT); max_recv_wr, IB_POLL_DIRECT);
else else
err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge, err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
cq_vector, cq_size, max_send_wr, cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_SOFTIRQ); max_recv_wr, IB_POLL_SOFTIRQ);
/* /*
* In case of error we do not bother to clean previous allocations, * In case of error we do not bother to clean previous allocations,
...@@ -1667,9 +1667,9 @@ static void destroy_con_cq_qp(struct rtrs_clt_con *con) ...@@ -1667,9 +1667,9 @@ static void destroy_con_cq_qp(struct rtrs_clt_con *con)
lockdep_assert_held(&con->con_mutex); lockdep_assert_held(&con->con_mutex);
rtrs_cq_qp_destroy(&con->c); rtrs_cq_qp_destroy(&con->c);
if (con->rsp_ius) { if (con->rsp_ius) {
rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_size); rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_num);
con->rsp_ius = NULL; con->rsp_ius = NULL;
con->queue_size = 0; con->queue_num = 0;
} }
if (sess->s.dev_ref && !--sess->s.dev_ref) { if (sess->s.dev_ref && !--sess->s.dev_ref) {
rtrs_ib_dev_put(sess->s.dev); rtrs_ib_dev_put(sess->s.dev);
......
...@@ -71,7 +71,7 @@ struct rtrs_clt_stats { ...@@ -71,7 +71,7 @@ struct rtrs_clt_stats {
struct rtrs_clt_con { struct rtrs_clt_con {
struct rtrs_con c; struct rtrs_con c;
struct rtrs_iu *rsp_ius; struct rtrs_iu *rsp_ius;
u32 queue_size; u32 queue_num;
unsigned int cpu; unsigned int cpu;
struct mutex con_mutex; struct mutex con_mutex;
atomic_t io_cnt; atomic_t io_cnt;
......
...@@ -95,7 +95,7 @@ struct rtrs_con { ...@@ -95,7 +95,7 @@ struct rtrs_con {
struct ib_cq *cq; struct ib_cq *cq;
struct rdma_cm_id *cm_id; struct rdma_cm_id *cm_id;
unsigned int cid; unsigned int cid;
u16 cq_size; int nr_cqe;
}; };
struct rtrs_sess { struct rtrs_sess {
...@@ -294,10 +294,10 @@ struct rtrs_msg_rdma_hdr { ...@@ -294,10 +294,10 @@ struct rtrs_msg_rdma_hdr {
/* rtrs.c */ /* rtrs.c */
struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t t, struct rtrs_iu *rtrs_iu_alloc(u32 queue_num, size_t size, gfp_t t,
struct ib_device *dev, enum dma_data_direction, struct ib_device *dev, enum dma_data_direction,
void (*done)(struct ib_cq *cq, struct ib_wc *wc)); void (*done)(struct ib_cq *cq, struct ib_wc *wc));
void rtrs_iu_free(struct rtrs_iu *iu, struct ib_device *dev, u32 queue_size); void rtrs_iu_free(struct rtrs_iu *iu, struct ib_device *dev, u32 queue_num);
int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu); int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu);
int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size, int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
struct ib_send_wr *head); struct ib_send_wr *head);
...@@ -312,8 +312,8 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe, ...@@ -312,8 +312,8 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
u32 imm_data, enum ib_send_flags flags, u32 imm_data, enum ib_send_flags flags,
struct ib_send_wr *head); struct ib_send_wr *head);
int rtrs_cq_qp_create(struct rtrs_sess *rtrs_sess, struct rtrs_con *con, int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
u32 max_send_sge, int cq_vector, int cq_size, u32 max_send_sge, int cq_vector, int nr_cqe,
u32 max_send_wr, u32 max_recv_wr, u32 max_send_wr, u32 max_recv_wr,
enum ib_poll_context poll_ctx); enum ib_poll_context poll_ctx);
void rtrs_cq_qp_destroy(struct rtrs_con *con); void rtrs_cq_qp_destroy(struct rtrs_con *con);
......
...@@ -1634,7 +1634,7 @@ static int create_con(struct rtrs_srv_sess *sess, ...@@ -1634,7 +1634,7 @@ static int create_con(struct rtrs_srv_sess *sess,
struct rtrs_sess *s = &sess->s; struct rtrs_sess *s = &sess->s;
struct rtrs_srv_con *con; struct rtrs_srv_con *con;
u32 cq_size, max_send_wr, max_recv_wr, wr_limit; u32 cq_num, max_send_wr, max_recv_wr, wr_limit;
int err, cq_vector; int err, cq_vector;
con = kzalloc(sizeof(*con), GFP_KERNEL); con = kzalloc(sizeof(*con), GFP_KERNEL);
...@@ -1657,7 +1657,6 @@ static int create_con(struct rtrs_srv_sess *sess, ...@@ -1657,7 +1657,6 @@ static int create_con(struct rtrs_srv_sess *sess,
*/ */
max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2; max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2; max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
cq_size = max_send_wr + max_recv_wr;
} else { } else {
/* /*
* In theory we might have queue_depth * 32 * In theory we might have queue_depth * 32
...@@ -1683,13 +1682,13 @@ static int create_con(struct rtrs_srv_sess *sess, ...@@ -1683,13 +1682,13 @@ static int create_con(struct rtrs_srv_sess *sess,
* requires an invalidate request + drain * requires an invalidate request + drain
* and qp gets into error state. * and qp gets into error state.
*/ */
cq_size = max_send_wr + max_recv_wr;
} }
cq_num = max_send_wr + max_recv_wr;
atomic_set(&con->sq_wr_avail, max_send_wr); atomic_set(&con->sq_wr_avail, max_send_wr);
cq_vector = rtrs_srv_get_next_cq_vector(sess); cq_vector = rtrs_srv_get_next_cq_vector(sess);
/* TODO: SOFTIRQ can be faster, but be careful with softirq context */ /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size, err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_num,
max_send_wr, max_recv_wr, max_send_wr, max_recv_wr,
IB_POLL_WORKQUEUE); IB_POLL_WORKQUEUE);
if (err) { if (err) {
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
MODULE_DESCRIPTION("RDMA Transport Core"); MODULE_DESCRIPTION("RDMA Transport Core");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t gfp_mask, struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask,
struct ib_device *dma_dev, struct ib_device *dma_dev,
enum dma_data_direction dir, enum dma_data_direction dir,
void (*done)(struct ib_cq *cq, struct ib_wc *wc)) void (*done)(struct ib_cq *cq, struct ib_wc *wc))
...@@ -26,10 +26,10 @@ struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t gfp_mask, ...@@ -26,10 +26,10 @@ struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t gfp_mask,
struct rtrs_iu *ius, *iu; struct rtrs_iu *ius, *iu;
int i; int i;
ius = kcalloc(queue_size, sizeof(*ius), gfp_mask); ius = kcalloc(iu_num, sizeof(*ius), gfp_mask);
if (!ius) if (!ius)
return NULL; return NULL;
for (i = 0; i < queue_size; i++) { for (i = 0; i < iu_num; i++) {
iu = &ius[i]; iu = &ius[i];
iu->direction = dir; iu->direction = dir;
iu->buf = kzalloc(size, gfp_mask); iu->buf = kzalloc(size, gfp_mask);
...@@ -50,7 +50,7 @@ struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t gfp_mask, ...@@ -50,7 +50,7 @@ struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t gfp_mask,
} }
EXPORT_SYMBOL_GPL(rtrs_iu_alloc); EXPORT_SYMBOL_GPL(rtrs_iu_alloc);
void rtrs_iu_free(struct rtrs_iu *ius, struct ib_device *ibdev, u32 queue_size) void rtrs_iu_free(struct rtrs_iu *ius, struct ib_device *ibdev, u32 queue_num)
{ {
struct rtrs_iu *iu; struct rtrs_iu *iu;
int i; int i;
...@@ -58,7 +58,7 @@ void rtrs_iu_free(struct rtrs_iu *ius, struct ib_device *ibdev, u32 queue_size) ...@@ -58,7 +58,7 @@ void rtrs_iu_free(struct rtrs_iu *ius, struct ib_device *ibdev, u32 queue_size)
if (!ius) if (!ius)
return; return;
for (i = 0; i < queue_size; i++) { for (i = 0; i < queue_num; i++) {
iu = &ius[i]; iu = &ius[i];
ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, iu->direction); ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, iu->direction);
kfree(iu->buf); kfree(iu->buf);
...@@ -212,20 +212,20 @@ static void qp_event_handler(struct ib_event *ev, void *ctx) ...@@ -212,20 +212,20 @@ static void qp_event_handler(struct ib_event *ev, void *ctx)
} }
} }
static int create_cq(struct rtrs_con *con, int cq_vector, u16 cq_size, static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
enum ib_poll_context poll_ctx) enum ib_poll_context poll_ctx)
{ {
struct rdma_cm_id *cm_id = con->cm_id; struct rdma_cm_id *cm_id = con->cm_id;
struct ib_cq *cq; struct ib_cq *cq;
cq = ib_cq_pool_get(cm_id->device, cq_size, cq_vector, poll_ctx); cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
if (IS_ERR(cq)) { if (IS_ERR(cq)) {
rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n", rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
PTR_ERR(cq)); PTR_ERR(cq));
return PTR_ERR(cq); return PTR_ERR(cq);
} }
con->cq = cq; con->cq = cq;
con->cq_size = cq_size; con->nr_cqe = nr_cqe;
return 0; return 0;
} }
...@@ -260,20 +260,20 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd, ...@@ -260,20 +260,20 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
} }
int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con, int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
u32 max_send_sge, int cq_vector, int cq_size, u32 max_send_sge, int cq_vector, int nr_cqe,
u32 max_send_wr, u32 max_recv_wr, u32 max_send_wr, u32 max_recv_wr,
enum ib_poll_context poll_ctx) enum ib_poll_context poll_ctx)
{ {
int err; int err;
err = create_cq(con, cq_vector, cq_size, poll_ctx); err = create_cq(con, cq_vector, nr_cqe, poll_ctx);
if (err) if (err)
return err; return err;
err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr, err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
max_send_sge); max_send_sge);
if (err) { if (err) {
ib_cq_pool_put(con->cq, con->cq_size); ib_cq_pool_put(con->cq, con->nr_cqe);
con->cq = NULL; con->cq = NULL;
return err; return err;
} }
...@@ -290,7 +290,7 @@ void rtrs_cq_qp_destroy(struct rtrs_con *con) ...@@ -290,7 +290,7 @@ void rtrs_cq_qp_destroy(struct rtrs_con *con)
con->qp = NULL; con->qp = NULL;
} }
if (con->cq) { if (con->cq) {
ib_cq_pool_put(con->cq, con->cq_size); ib_cq_pool_put(con->cq, con->nr_cqe);
con->cq = NULL; con->cq = NULL;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment