Commit 9766edc3 authored by Shaobo Xu's avatar Shaobo Xu Committed by Doug Ledford

RDMA/hns: Split CQE from MTT in hip08

In hip08, the SQWQE/SGE/RQWQE and CQE have different hop num and
page size, so we need to manage the base address table of the
SQWQE/SGE/RQWQE and CQE separately.

This patch is to split CQE from MTT(SQWQE/SGE/RQWQE).
Signed-off-by: default avatarShaobo Xu <xushaobo2@huawei.com>
Signed-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarWei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 6a93c77a
...@@ -96,7 +96,11 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, ...@@ -96,7 +96,11 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
cq_table = &hr_dev->cq_table; cq_table = &hr_dev->cq_table;
/* Get the physical address of cq buf */ /* Get the physical address of cq buf */
mtt_table = &hr_dev->mr_table.mtt_table; if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
mtt_table = &hr_dev->mr_table.mtt_cqe_table;
else
mtt_table = &hr_dev->mr_table.mtt_table;
mtts = hns_roce_table_find(hr_dev, mtt_table, mtts = hns_roce_table_find(hr_dev, mtt_table,
hr_mtt->first_seg, &dma_handle); hr_mtt->first_seg, &dma_handle);
if (!mtts) { if (!mtts) {
...@@ -221,6 +225,10 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, ...@@ -221,6 +225,10 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
if (IS_ERR(*umem)) if (IS_ERR(*umem))
return PTR_ERR(*umem); return PTR_ERR(*umem);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
else
buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem), ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
(*umem)->page_shift, &buf->hr_mtt); (*umem)->page_shift, &buf->hr_mtt);
if (ret) if (ret)
...@@ -250,6 +258,11 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev, ...@@ -250,6 +258,11 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
if (ret) if (ret)
goto out; goto out;
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
else
buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages, ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
buf->hr_buf.page_shift, &buf->hr_mtt); buf->hr_buf.page_shift, &buf->hr_mtt);
if (ret) if (ret)
......
...@@ -170,6 +170,11 @@ enum { ...@@ -170,6 +170,11 @@ enum {
HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07, HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
}; };
enum hns_roce_mtt_type {
MTT_TYPE_WQE = 0,
MTT_TYPE_CQE,
};
#define HNS_ROCE_CMD_SUCCESS 1 #define HNS_ROCE_CMD_SUCCESS 1
#define HNS_ROCE_PORT_DOWN 0 #define HNS_ROCE_PORT_DOWN 0
...@@ -241,9 +246,10 @@ struct hns_roce_hem_table { ...@@ -241,9 +246,10 @@ struct hns_roce_hem_table {
}; };
struct hns_roce_mtt { struct hns_roce_mtt {
unsigned long first_seg; unsigned long first_seg;
int order; int order;
int page_shift; int page_shift;
enum hns_roce_mtt_type mtt_type;
}; };
/* Only support 4K page size for mr register */ /* Only support 4K page size for mr register */
...@@ -268,6 +274,8 @@ struct hns_roce_mr_table { ...@@ -268,6 +274,8 @@ struct hns_roce_mr_table {
struct hns_roce_buddy mtt_buddy; struct hns_roce_buddy mtt_buddy;
struct hns_roce_hem_table mtt_table; struct hns_roce_hem_table mtt_table;
struct hns_roce_hem_table mtpt_table; struct hns_roce_hem_table mtpt_table;
struct hns_roce_buddy mtt_cqe_buddy;
struct hns_roce_hem_table mtt_cqe_table;
}; };
struct hns_roce_wq { struct hns_roce_wq {
......
...@@ -1041,4 +1041,7 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev) ...@@ -1041,4 +1041,7 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->mr_table.mtt_cqe_table);
} }
...@@ -546,6 +546,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ...@@ -546,6 +546,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
return ret; return ret;
} }
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->mr_table.mtt_cqe_table,
HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz,
hr_dev->caps.num_cqe_segs, 1);
if (ret) {
dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n");
goto err_unmap_cqe;
}
}
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table, ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz, HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
hr_dev->caps.num_mtpts, 1); hr_dev->caps.num_mtpts, 1);
...@@ -593,6 +604,12 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ...@@ -593,6 +604,12 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
err_unmap_mtt: err_unmap_mtt:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->mr_table.mtt_cqe_table);
err_unmap_cqe:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
return ret; return ret;
} }
......
...@@ -177,18 +177,28 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy) ...@@ -177,18 +177,28 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
} }
static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order, static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
unsigned long *seg) unsigned long *seg, u32 mtt_type)
{ {
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
int ret = 0; struct hns_roce_hem_table *table;
struct hns_roce_buddy *buddy;
int ret;
if (mtt_type == MTT_TYPE_WQE) {
buddy = &mr_table->mtt_buddy;
table = &mr_table->mtt_table;
} else {
buddy = &mr_table->mtt_cqe_buddy;
table = &mr_table->mtt_cqe_table;
}
ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg); ret = hns_roce_buddy_alloc(buddy, order, seg);
if (ret == -1) if (ret == -1)
return -1; return -1;
if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg, if (hns_roce_table_get_range(hr_dev, table, *seg,
*seg + (1 << order) - 1)) { *seg + (1 << order) - 1)) {
hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order); hns_roce_buddy_free(buddy, *seg, order);
return -1; return -1;
} }
...@@ -198,7 +208,7 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order, ...@@ -198,7 +208,7 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
struct hns_roce_mtt *mtt) struct hns_roce_mtt *mtt)
{ {
int ret = 0; int ret;
int i; int i;
/* Page num is zero, correspond to DMA memory register */ /* Page num is zero, correspond to DMA memory register */
...@@ -217,7 +227,8 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, ...@@ -217,7 +227,8 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
++mtt->order; ++mtt->order;
/* Allocate MTT entry */ /* Allocate MTT entry */
ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg); ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
mtt->mtt_type);
if (ret == -1) if (ret == -1)
return -ENOMEM; return -ENOMEM;
...@@ -231,9 +242,19 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt) ...@@ -231,9 +242,19 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
if (mtt->order < 0) if (mtt->order < 0)
return; return;
hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order); if (mtt->mtt_type == MTT_TYPE_WQE) {
hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg, hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1); mtt->order);
hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
} else {
hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
mtt->order);
hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
}
} }
EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup); EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
...@@ -362,7 +383,11 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, ...@@ -362,7 +383,11 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1)) if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
return -EINVAL; return -EINVAL;
table = &hr_dev->mr_table.mtt_table; if (mtt->mtt_type == MTT_TYPE_WQE)
table = &hr_dev->mr_table.mtt_table;
else
table = &hr_dev->mr_table.mtt_cqe_table;
mtts = hns_roce_table_find(hr_dev, table, mtts = hns_roce_table_find(hr_dev, table,
mtt->first_seg + s / hr_dev->caps.mtt_entry_sz, mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
&dma_handle); &dma_handle);
...@@ -409,9 +434,9 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev, ...@@ -409,9 +434,9 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct hns_roce_buf *buf) struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
{ {
u32 i = 0; u64 *page_list;
int ret = 0; int ret;
u64 *page_list = NULL; u32 i;
page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL); page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
if (!page_list) if (!page_list)
...@@ -434,7 +459,7 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, ...@@ -434,7 +459,7 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
int ret = 0; int ret;
ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap, ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
hr_dev->caps.num_mtpts, hr_dev->caps.num_mtpts,
...@@ -448,8 +473,17 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) ...@@ -448,8 +473,17 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
if (ret) if (ret)
goto err_buddy; goto err_buddy;
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
ilog2(hr_dev->caps.num_cqe_segs));
if (ret)
goto err_buddy_cqe;
}
return 0; return 0;
err_buddy_cqe:
hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
err_buddy: err_buddy:
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap); hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
return ret; return ret;
...@@ -460,6 +494,8 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev) ...@@ -460,6 +494,8 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
hns_roce_buddy_cleanup(&mr_table->mtt_buddy); hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap); hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
} }
......
...@@ -440,6 +440,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -440,6 +440,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out; goto err_out;
} }
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem), ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
hr_qp->umem->page_shift, &hr_qp->mtt); hr_qp->umem->page_shift, &hr_qp->mtt);
if (ret) { if (ret) {
...@@ -490,6 +491,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -490,6 +491,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out; goto err_out;
} }
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
/* Write MTT */ /* Write MTT */
ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages, ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
hr_qp->hr_buf.page_shift, &hr_qp->mtt); hr_qp->hr_buf.page_shift, &hr_qp->mtt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment