Commit 0fee4516 authored by Wenpeng Liang's avatar Wenpeng Liang Committed by Jason Gunthorpe

RDMA/hns: Refactor hns_roce_create_srq()

Split the SRQ creation process into multiple steps and encapsulate them
into functions.

Link: https://lore.kernel.org/r/1611997090-48820-7-git-send-email-liweihang@huawei.comSigned-off-by: default avatarWenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 6ee00fbf
...@@ -506,6 +506,7 @@ struct hns_roce_srq { ...@@ -506,6 +506,7 @@ struct hns_roce_srq {
int max_gs; int max_gs;
u32 rsv_sge; u32 rsv_sge;
int wqe_shift; int wqe_shift;
u32 cqn;
void __iomem *db_reg_l; void __iomem *db_reg_l;
atomic_t refcount; atomic_t refcount;
...@@ -953,8 +954,8 @@ struct hns_roce_hw { ...@@ -953,8 +954,8 @@ struct hns_roce_hw {
int (*init_eq)(struct hns_roce_dev *hr_dev); int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev); void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
void (*write_srqc)(struct hns_roce_dev *hr_dev, void (*write_srqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq, u32 pdn, u16 xrcd, u32 cqn, struct hns_roce_srq *srq, void *mb_buf,
void *mb_buf, u64 *mtts_wqe, u64 *mtts_idx, u64 *mtts_wqe, u64 *mtts_idx,
dma_addr_t dma_handle_wqe, dma_addr_t dma_handle_wqe,
dma_addr_t dma_handle_idx); dma_addr_t dma_handle_idx);
int (*modify_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr, int (*modify_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
......
...@@ -5228,9 +5228,9 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, ...@@ -5228,9 +5228,9 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
} }
static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq, u32 pdn, u16 xrcd, struct hns_roce_srq *srq, void *mb_buf,
u32 cqn, void *mb_buf, u64 *mtts_wqe, u64 *mtts_wqe, u64 *mtts_idx,
u64 *mtts_idx, dma_addr_t dma_handle_wqe, dma_addr_t dma_handle_wqe,
dma_addr_t dma_handle_idx) dma_addr_t dma_handle_idx)
{ {
struct hns_roce_srq_context *srq_context; struct hns_roce_srq_context *srq_context;
...@@ -5257,7 +5257,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, ...@@ -5257,7 +5257,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0); SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M, roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
SRQC_BYTE_12_SRQ_XRCD_S, xrcd); SRQC_BYTE_12_SRQ_XRCD_S, 0);
srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3)); srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
...@@ -5267,7 +5267,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, ...@@ -5267,7 +5267,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
dma_handle_wqe >> 35); dma_handle_wqe >> 35);
roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M, roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
SRQC_BYTE_28_PD_S, pdn); SRQC_BYTE_28_PD_S, to_hr_pd(srq->ibsrq.pd)->pdn);
roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M, roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 : SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
fls(srq->max_gs - 1)); fls(srq->max_gs - 1));
...@@ -5307,7 +5307,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, ...@@ -5307,7 +5307,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
upper_32_bits(to_hr_hw_page_addr(mtts_idx[1]))); upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
roce_set_field(srq_context->byte_56_xrc_cqn, roce_set_field(srq_context->byte_56_xrc_cqn,
SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S, SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
cqn); srq->cqn);
roce_set_field(srq_context->byte_56_xrc_cqn, roce_set_field(srq_context->byte_56_xrc_cqn,
SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M, SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S, SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
......
...@@ -77,8 +77,7 @@ static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev, ...@@ -77,8 +77,7 @@ static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_TIMEOUT_MSECS);
} }
static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
u32 pdn, u32 cqn, u16 xrcd, u64 db_rec_addr)
{ {
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
...@@ -133,9 +132,8 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, ...@@ -133,9 +132,8 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
goto err_xa; goto err_xa;
} }
hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf, hr_dev->hw->write_srqc(hr_dev, srq, mailbox->buf, mtts_wqe, mtts_idx,
mtts_wqe, mtts_idx, dma_handle_wqe, dma_handle_wqe, dma_handle_idx);
dma_handle_idx);
ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn); ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
...@@ -144,9 +142,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, ...@@ -144,9 +142,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
goto err_xa; goto err_xa;
} }
atomic_set(&srq->refcount, 1); return 0;
init_completion(&srq->free);
return ret;
err_xa: err_xa:
xa_erase(&srq_table->xa, srq->srqn); xa_erase(&srq_table->xa, srq->srqn);
...@@ -179,45 +175,13 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) ...@@ -179,45 +175,13 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR); hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
} }
static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
struct ib_udata *udata, unsigned long addr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_attr buf_attr = {};
int err;
srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
HNS_ROCE_SGE_SIZE *
srq->max_gs)));
buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
srq->wqe_shift);
buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
buf_attr.region_count = 1;
err = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
hr_dev->caps.srqwqe_ba_pg_sz +
HNS_HW_PAGE_SHIFT, udata, addr);
if (err)
ibdev_err(ibdev,
"failed to alloc SRQ buf mtr, ret = %d.\n", err);
return err;
}
static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
}
static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
struct ib_udata *udata, unsigned long addr) struct ib_udata *udata, unsigned long addr)
{ {
struct hns_roce_idx_que *idx_que = &srq->idx_que; struct hns_roce_idx_que *idx_que = &srq->idx_que;
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_attr buf_attr = {}; struct hns_roce_buf_attr buf_attr = {};
int err; int ret;
srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ); srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
...@@ -227,20 +191,20 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, ...@@ -227,20 +191,20 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num; buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
buf_attr.region_count = 1; buf_attr.region_count = 1;
err = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr, ret = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT, hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
udata, addr); udata, addr);
if (err) { if (ret) {
ibdev_err(ibdev, ibdev_err(ibdev,
"failed to alloc SRQ idx mtr, ret = %d.\n", err); "failed to alloc SRQ idx mtr, ret = %d.\n", ret);
return err; return ret;
} }
if (!udata) { if (!udata) {
idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL); idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
if (!idx_que->bitmap) { if (!idx_que->bitmap) {
ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n"); ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
err = -ENOMEM; ret = -ENOMEM;
goto err_idx_mtr; goto err_idx_mtr;
} }
} }
...@@ -252,7 +216,7 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, ...@@ -252,7 +216,7 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
err_idx_mtr: err_idx_mtr:
hns_roce_mtr_destroy(hr_dev, &idx_que->mtr); hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
return err; return ret;
} }
static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
...@@ -264,6 +228,40 @@ static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) ...@@ -264,6 +228,40 @@ static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
hns_roce_mtr_destroy(hr_dev, &idx_que->mtr); hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
} }
static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq,
struct ib_udata *udata, unsigned long addr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_attr buf_attr = {};
int ret;
srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
HNS_ROCE_SGE_SIZE *
srq->max_gs)));
buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
srq->wqe_shift);
buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
buf_attr.region_count = 1;
ret = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
hr_dev->caps.srqwqe_ba_pg_sz +
HNS_HW_PAGE_SHIFT, udata, addr);
if (ret)
ibdev_err(ibdev,
"failed to alloc SRQ buf mtr, ret = %d.\n", ret);
return ret;
}
static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq)
{
hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
}
static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{ {
srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL); srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
...@@ -301,110 +299,149 @@ static u32 proc_srq_sge(struct hns_roce_dev *dev, struct hns_roce_srq *hr_srq, ...@@ -301,110 +299,149 @@ static u32 proc_srq_sge(struct hns_roce_dev *dev, struct hns_roce_srq *hr_srq,
return max_sge; return max_sge;
} }
int hns_roce_create_srq(struct ib_srq *ib_srq, static int set_srq_basic_param(struct hns_roce_srq *srq,
struct ib_srq_init_attr *init_attr, struct ib_srq_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device); struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
struct hns_roce_ib_create_srq_resp resp = {}; struct ib_srq_attr *attr = &init_attr->attr;
struct hns_roce_srq *srq = to_hr_srq(ib_srq);
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_ib_create_srq ucmd = {};
u32 max_sge; u32 max_sge;
int ret;
u32 cqn;
if (init_attr->srq_type != IB_SRQT_BASIC &&
init_attr->srq_type != IB_SRQT_XRC)
return -EOPNOTSUPP;
max_sge = proc_srq_sge(hr_dev, srq, !!udata); max_sge = proc_srq_sge(hr_dev, srq, !!udata);
if (attr->max_wr > hr_dev->caps.max_srq_wrs ||
if (init_attr->attr.max_wr > hr_dev->caps.max_srq_wrs || attr->max_sge > max_sge) {
init_attr->attr.max_sge > max_sge) {
ibdev_err(&hr_dev->ib_dev, ibdev_err(&hr_dev->ib_dev,
"SRQ config error, depth = %u, sge = %d\n", "invalid SRQ attr, depth = %u, sge = %u.\n",
init_attr->attr.max_wr, init_attr->attr.max_sge); attr->max_wr, attr->max_sge);
return -EINVAL; return -EINVAL;
} }
mutex_init(&srq->mutex); attr->max_wr = max_t(u32, attr->max_wr, HNS_ROCE_MIN_SRQ_WQE_NUM);
spin_lock_init(&srq->lock); srq->wqe_cnt = roundup_pow_of_two(attr->max_wr);
srq->max_gs = roundup_pow_of_two(attr->max_sge + srq->rsv_sge);
attr->max_wr = srq->wqe_cnt;
attr->max_sge = srq->max_gs - srq->rsv_sge;
attr->srq_limit = 0;
return 0;
}
init_attr->attr.max_wr = max_t(u32, init_attr->attr.max_wr, static void set_srq_ext_param(struct hns_roce_srq *srq,
HNS_ROCE_MIN_SRQ_WQE_NUM); struct ib_srq_init_attr *init_attr)
srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr); {
srq->max_gs = srq->cqn = ib_srq_has_cq(init_attr->srq_type) ?
roundup_pow_of_two(init_attr->attr.max_sge + srq->rsv_sge); to_hr_cq(init_attr->ext.cq)->cqn : 0;
init_attr->attr.max_wr = srq->wqe_cnt; }
init_attr->attr.max_sge = srq->max_gs;
init_attr->attr.srq_limit = 0; static int set_srq_param(struct hns_roce_srq *srq,
struct ib_srq_init_attr *init_attr,
struct ib_udata *udata)
{
int ret;
ret = set_srq_basic_param(srq, init_attr, udata);
if (ret)
return ret;
set_srq_ext_param(srq, init_attr);
return 0;
}
static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
struct ib_udata *udata)
{
struct hns_roce_ib_create_srq ucmd = {};
int ret;
if (udata) { if (udata) {
ret = ib_copy_from_udata(&ucmd, udata, ret = ib_copy_from_udata(&ucmd, udata,
min(udata->inlen, sizeof(ucmd))); min(udata->inlen, sizeof(ucmd)));
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n", ibdev_err(&hr_dev->ib_dev,
"failed to copy SRQ udata, ret = %d.\n",
ret); ret);
return ret; return ret;
} }
} }
ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr); ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
if (ret) { if (ret)
ibdev_err(ibdev,
"failed to alloc SRQ buffer, ret = %d.\n", ret);
return ret; return ret;
}
ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr); ret = alloc_srq_wqe_buf(hr_dev, srq, udata, ucmd.buf_addr);
if (ret) { if (ret)
ibdev_err(ibdev, "failed to alloc SRQ idx, ret = %d.\n", ret); goto err_idx;
goto err_buf_alloc;
}
if (!udata) { if (!udata) {
ret = alloc_srq_wrid(hr_dev, srq); ret = alloc_srq_wrid(hr_dev, srq);
if (ret) { if (ret)
ibdev_err(ibdev, "failed to alloc SRQ wrid, ret = %d.\n", goto err_wqe_buf;
ret);
goto err_idx_alloc;
}
} }
cqn = ib_srq_has_cq(init_attr->srq_type) ? return 0;
to_hr_cq(init_attr->ext.cq)->cqn : 0;
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0); err_wqe_buf:
if (ret) { free_srq_wqe_buf(hr_dev, srq);
ibdev_err(ibdev, err_idx:
"failed to alloc SRQ context, ret = %d.\n", ret); free_srq_idx(hr_dev, srq);
goto err_wrid_alloc;
}
srq->event = hns_roce_ib_srq_event; return ret;
resp.srqn = srq->srqn; }
srq->max_gs = init_attr->attr.max_sge;
init_attr->attr.max_sge = srq->max_gs - srq->rsv_sge;
if (udata) { static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
ret = ib_copy_to_udata(udata, &resp, {
min(udata->outlen, sizeof(resp))); free_srq_wrid(srq);
free_srq_wqe_buf(hr_dev, srq);
free_srq_idx(hr_dev, srq);
}
int hns_roce_create_srq(struct ib_srq *ib_srq,
struct ib_srq_init_attr *init_attr,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
struct hns_roce_ib_create_srq_resp resp = {};
struct hns_roce_srq *srq = to_hr_srq(ib_srq);
int ret;
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
ret = set_srq_param(srq, init_attr, udata);
if (ret) if (ret)
goto err_srqc_alloc; return ret;
ret = alloc_srq_buf(hr_dev, srq, udata);
if (ret)
return ret;
ret = alloc_srqc(hr_dev, srq);
if (ret)
goto err_srq_buf;
if (udata) {
resp.srqn = srq->srqn;
if (ib_copy_to_udata(udata, &resp,
min(udata->outlen, sizeof(resp)))) {
ret = -EFAULT;
goto err_srqc;
}
} }
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
srq->event = hns_roce_ib_srq_event;
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
return 0; return 0;
err_srqc_alloc: err_srqc:
free_srqc(hr_dev, srq); free_srqc(hr_dev, srq);
err_wrid_alloc: err_srq_buf:
free_srq_wrid(srq);
err_idx_alloc:
free_srq_idx(hr_dev, srq);
err_buf_alloc:
free_srq_buf(hr_dev, srq); free_srq_buf(hr_dev, srq);
return ret; return ret;
} }
...@@ -414,8 +451,6 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) ...@@ -414,8 +451,6 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
struct hns_roce_srq *srq = to_hr_srq(ibsrq); struct hns_roce_srq *srq = to_hr_srq(ibsrq);
free_srqc(hr_dev, srq); free_srqc(hr_dev, srq);
free_srq_idx(hr_dev, srq);
free_srq_wrid(srq);
free_srq_buf(hr_dev, srq); free_srq_buf(hr_dev, srq);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment