Commit 2f49de21 authored by Xi Wang's avatar Xi Wang Committed by Jason Gunthorpe

RDMA/hns: Optimize mhop get flow for multi-hop addressing

Splits hns_roce_table_mhop_get() into 4 sub-functions to make the code flow
clearer.

Link: https://lore.kernel.org/r/1584417324-2255-2-git-send-email-liweihang@huawei.comSigned-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent b1d56fdc
...@@ -39,6 +39,16 @@ ...@@ -39,6 +39,16 @@
#define DMA_ADDR_T_SHIFT 12 #define DMA_ADDR_T_SHIFT 12
#define BT_BA_SHIFT 32 #define BT_BA_SHIFT 32
#define HEM_INDEX_BUF BIT(0)
#define HEM_INDEX_L0 BIT(1)
#define HEM_INDEX_L1 BIT(2)
struct hns_roce_hem_index {
u64 buf;
u64 l0;
u64 l1;
u32 inited; /* indicate which index is available */
};
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
{ {
int hop_num = 0; int hop_num = 0;
...@@ -434,178 +444,235 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, ...@@ -434,178 +444,235 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
return ret; return ret;
} }
static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, static int calc_hem_config(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, struct hns_roce_hem_table *table, unsigned long obj,
unsigned long obj) struct hns_roce_hem_mhop *mhop,
struct hns_roce_hem_index *index)
{ {
struct device *dev = hr_dev->dev; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_hem_mhop mhop; unsigned long mhop_obj = obj;
struct hns_roce_hem_iter iter; u32 l0_idx, l1_idx, l2_idx;
u32 buf_chunk_size;
u32 bt_chunk_size;
u32 chunk_ba_num; u32 chunk_ba_num;
u32 hop_num;
u32 size;
u32 bt_num; u32 bt_num;
u64 hem_idx;
u64 bt_l1_idx = 0;
u64 bt_l0_idx = 0;
u64 bt_ba;
unsigned long mhop_obj = obj;
int bt_l1_allocated = 0;
int bt_l0_allocated = 0;
int step_idx;
int ret; int ret;
ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop);
if (ret) if (ret)
return ret; return ret;
buf_chunk_size = mhop.buf_chunk_size; l0_idx = mhop->l0_idx;
bt_chunk_size = mhop.bt_chunk_size; l1_idx = mhop->l1_idx;
hop_num = mhop.hop_num; l2_idx = mhop->l2_idx;
chunk_ba_num = bt_chunk_size / BA_BYTE_LEN; chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
bt_num = hns_roce_get_bt_num(table->type, hop_num);
switch (bt_num) { switch (bt_num) {
case 3: case 3:
hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num + index->l1 = l0_idx * chunk_ba_num + l1_idx;
mhop.l1_idx * chunk_ba_num + mhop.l2_idx; index->l0 = l0_idx;
bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx; index->buf = l0_idx * chunk_ba_num * chunk_ba_num +
bt_l0_idx = mhop.l0_idx; l1_idx * chunk_ba_num + l2_idx;
break; break;
case 2: case 2:
hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx; index->l0 = l0_idx;
bt_l0_idx = mhop.l0_idx; index->buf = l0_idx * chunk_ba_num + l1_idx;
break; break;
case 1: case 1:
hem_idx = mhop.l0_idx; index->buf = l0_idx;
break; break;
default: default:
dev_err(dev, "Table %d not support hop_num = %d!\n", ibdev_err(ibdev, "Table %d not support mhop.hop_num = %d!\n",
table->type, hop_num); table->type, mhop->hop_num);
return -EINVAL; return -EINVAL;
} }
if (unlikely(hem_idx >= table->num_hem)) { if (unlikely(index->buf >= table->num_hem)) {
dev_err(dev, "Table %d exceed hem limt idx = %llu,max = %lu!\n", ibdev_err(ibdev, "Table %d exceed hem limt idx %llu,max %lu!\n",
table->type, hem_idx, table->num_hem); table->type, index->buf, table->num_hem);
return -EINVAL; return -EINVAL;
} }
mutex_lock(&table->mutex); return 0;
}
if (table->hem[hem_idx]) { static void free_mhop_hem(struct hns_roce_dev *hr_dev,
++table->hem[hem_idx]->refcount; struct hns_roce_hem_table *table,
goto out; struct hns_roce_hem_mhop *mhop,
struct hns_roce_hem_index *index)
{
u32 bt_size = mhop->bt_chunk_size;
struct device *dev = hr_dev->dev;
if (index->inited & HEM_INDEX_BUF) {
hns_roce_free_hem(hr_dev, table->hem[index->buf]);
table->hem[index->buf] = NULL;
}
if (index->inited & HEM_INDEX_L1) {
dma_free_coherent(dev, bt_size, table->bt_l1[index->l1],
table->bt_l1_dma_addr[index->l1]);
table->bt_l1[index->l1] = NULL;
} }
if (index->inited & HEM_INDEX_L0) {
dma_free_coherent(dev, bt_size, table->bt_l0[index->l0],
table->bt_l0_dma_addr[index->l0]);
table->bt_l0[index->l0] = NULL;
}
}
static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
struct hns_roce_hem_mhop *mhop,
struct hns_roce_hem_index *index)
{
u32 bt_size = mhop->bt_chunk_size;
struct device *dev = hr_dev->dev;
struct hns_roce_hem_iter iter;
gfp_t flag;
u64 bt_ba;
u32 size;
int ret;
/* alloc L1 BA's chunk */ /* alloc L1 BA's chunk */
if ((check_whether_bt_num_3(table->type, hop_num) || if ((check_whether_bt_num_3(table->type, mhop->hop_num) ||
check_whether_bt_num_2(table->type, hop_num)) && check_whether_bt_num_2(table->type, mhop->hop_num)) &&
!table->bt_l0[bt_l0_idx]) { !table->bt_l0[index->l0]) {
table->bt_l0[bt_l0_idx] = dma_alloc_coherent(dev, bt_chunk_size, table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size,
&(table->bt_l0_dma_addr[bt_l0_idx]), &table->bt_l0_dma_addr[index->l0],
GFP_KERNEL); GFP_KERNEL);
if (!table->bt_l0[bt_l0_idx]) { if (!table->bt_l0[index->l0]) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
bt_l0_allocated = 1; index->inited |= HEM_INDEX_L0;
/* set base address to hardware */
if (table->type < HEM_TYPE_MTT) {
step_idx = 0;
if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
ret = -ENODEV;
dev_err(dev, "set HEM base address to HW failed!\n");
goto err_dma_alloc_l1;
}
}
} }
/* alloc L2 BA's chunk */ /* alloc L2 BA's chunk */
if (check_whether_bt_num_3(table->type, hop_num) && if (check_whether_bt_num_3(table->type, mhop->hop_num) &&
!table->bt_l1[bt_l1_idx]) { !table->bt_l1[index->l1]) {
table->bt_l1[bt_l1_idx] = dma_alloc_coherent(dev, bt_chunk_size, table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size,
&(table->bt_l1_dma_addr[bt_l1_idx]), &table->bt_l1_dma_addr[index->l1],
GFP_KERNEL); GFP_KERNEL);
if (!table->bt_l1[bt_l1_idx]) { if (!table->bt_l1[index->l1]) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_dma_alloc_l1; goto err_alloc_hem;
}
bt_l1_allocated = 1;
*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) =
table->bt_l1_dma_addr[bt_l1_idx];
/* set base address to hardware */
step_idx = 1;
if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
ret = -ENODEV;
dev_err(dev, "set HEM base address to HW failed!\n");
goto err_alloc_hem_buf;
} }
index->inited |= HEM_INDEX_L1;
*(table->bt_l0[index->l0] + mhop->l1_idx) =
table->bt_l1_dma_addr[index->l1];
} }
/* /*
* alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC. * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
* alloc bt space chunk for MTT/CQE. * alloc bt space chunk for MTT/CQE.
*/ */
size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size; size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
table->hem[hem_idx] = hns_roce_alloc_hem(hr_dev, flag = (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN;
size >> PAGE_SHIFT, table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
size, size, flag);
(table->lowmem ? GFP_KERNEL : if (!table->hem[index->buf]) {
GFP_HIGHUSER) | __GFP_NOWARN);
if (!table->hem[hem_idx]) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_alloc_hem_buf; goto err_alloc_hem;
} }
hns_roce_hem_first(table->hem[hem_idx], &iter); index->inited |= HEM_INDEX_BUF;
hns_roce_hem_first(table->hem[index->buf], &iter);
bt_ba = hns_roce_hem_addr(&iter); bt_ba = hns_roce_hem_addr(&iter);
if (table->type < HEM_TYPE_MTT) { if (table->type < HEM_TYPE_MTT) {
if (hop_num == 2) { if (mhop->hop_num == 2)
*(table->bt_l1[bt_l1_idx] + mhop.l2_idx) = bt_ba; *(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
step_idx = 2; else if (mhop->hop_num == 1)
} else if (hop_num == 1) { *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba; } else if (mhop->hop_num == 2) {
step_idx = 1; *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
} else if (hop_num == HNS_ROCE_HOP_NUM_0) {
step_idx = 0;
} else {
ret = -EINVAL;
goto err_dma_alloc_l1;
} }
/* set HEM base address to hardware */ return 0;
if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) { err_alloc_hem:
ret = -ENODEV; free_mhop_hem(hr_dev, table, mhop, index);
dev_err(dev, "set HEM base address to HW failed!\n"); out:
goto err_alloc_hem_buf; return ret;
}
static int set_mhop_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj,
struct hns_roce_hem_mhop *mhop,
struct hns_roce_hem_index *index)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
int step_idx;
int ret;
if (index->inited & HEM_INDEX_L0) {
ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0);
if (ret) {
ibdev_err(ibdev, "set HEM step 0 failed!\n");
goto out;
} }
} else if (hop_num == 2) {
*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
} }
++table->hem[hem_idx]->refcount; if (index->inited & HEM_INDEX_L1) {
ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1);
if (ret) {
ibdev_err(ibdev, "set HEM step 1 failed!\n");
goto out; goto out;
}
}
err_alloc_hem_buf: if (index->inited & HEM_INDEX_BUF) {
if (bt_l1_allocated) { if (mhop->hop_num == HNS_ROCE_HOP_NUM_0)
dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx], step_idx = 0;
table->bt_l1_dma_addr[bt_l1_idx]); else
table->bt_l1[bt_l1_idx] = NULL; step_idx = mhop->hop_num;
ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
if (ret)
ibdev_err(ibdev, "set HEM step last failed!\n");
} }
out:
return ret;
}
static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long obj)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_hem_index index = {};
struct hns_roce_hem_mhop mhop = {};
int ret;
err_dma_alloc_l1: ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
if (bt_l0_allocated) { if (ret) {
dma_free_coherent(dev, bt_chunk_size, table->bt_l0[bt_l0_idx], ibdev_err(ibdev, "calc hem config failed!\n");
table->bt_l0_dma_addr[bt_l0_idx]); return ret;
table->bt_l0[bt_l0_idx] = NULL;
} }
mutex_lock(&table->mutex);
if (table->hem[index.buf]) {
++table->hem[index.buf]->refcount;
goto out;
}
ret = alloc_mhop_hem(hr_dev, table, &mhop, &index);
if (ret) {
ibdev_err(ibdev, "alloc mhop hem failed!\n");
goto out;
}
/* set HEM base address to hardware */
if (table->type < HEM_TYPE_MTT) {
ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index);
if (ret) {
ibdev_err(ibdev, "set HEM address to HW failed!\n");
goto err_alloc;
}
}
++table->hem[index.buf]->refcount;
goto out;
err_alloc:
free_mhop_hem(hr_dev, table, &mhop, &index);
out: out:
mutex_unlock(&table->mutex); mutex_unlock(&table->mutex);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment