Commit 4f5731b1 authored by Chengchang Tang's avatar Chengchang Tang Committed by Leon Romanovsky

RDMA/hns: Refactor mtr_init_buf_cfg()

page_shift and page_cnt is only used in mtr_map_bufs(). And these
parameter could be calculated indepedently.

Strip the computation of page_shift and page_cnt from mtr_init_buf_cfg(),
reducing the number of parameters of it. This helps reducing coupling
between mtr_init_buf_cfg() and mtr_map_bufs().
Signed-off-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarJunxian Huang <huangjunxian6@hisilicon.com>
Link: https://lore.kernel.org/r/20240113085935.2838701-3-huangjunxian6@hisilicon.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent a4ca3410
...@@ -707,14 +707,37 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, ...@@ -707,14 +707,37 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return 0; return 0;
} }
static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, static int cal_mtr_pg_cnt(struct hns_roce_mtr *mtr)
int page_count, unsigned int page_shift) {
struct hns_roce_buf_region *region;
int page_cnt = 0;
int i;
for (i = 0; i < mtr->hem_cfg.region_count; i++) {
region = &mtr->hem_cfg.region[i];
page_cnt += region->count;
}
return page_cnt;
}
static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
{ {
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
int page_count = cal_mtr_pg_cnt(mtr);
unsigned int page_shift;
dma_addr_t *pages; dma_addr_t *pages;
int npage; int npage;
int ret; int ret;
/* When HEM buffer uses 0-level addressing, the page size is
* equal to the whole buffer size, and we split the buffer into
* small pages which is used to check whether the adjacent
* units are in the continuous space and its size is fixed to
* 4K based on hns ROCEE's requirement.
*/
page_shift = mtr->hem_cfg.is_direct ? HNS_HW_PAGE_SHIFT :
mtr->hem_cfg.buf_pg_shift;
/* alloc a tmp array to store buffer's dma address */ /* alloc a tmp array to store buffer's dma address */
pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL); pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
if (!pages) if (!pages)
...@@ -894,37 +917,30 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, ...@@ -894,37 +917,30 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
struct hns_roce_buf_attr *attr, struct hns_roce_buf_attr *attr,
struct hns_roce_hem_cfg *cfg, struct hns_roce_hem_cfg *cfg, u64 unalinged_size)
unsigned int *buf_page_shift, u64 unalinged_size)
{ {
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_region *r; struct hns_roce_buf_region *r;
u64 first_region_padding; u64 first_region_padding;
int page_cnt, region_cnt; int page_cnt, region_cnt;
unsigned int page_shift; size_t buf_pg_sz;
size_t buf_size; size_t buf_size;
/* If mtt is disabled, all pages must be within a continuous range */ /* If mtt is disabled, all pages must be within a continuous range */
cfg->is_direct = !mtr_has_mtt(attr); cfg->is_direct = !mtr_has_mtt(attr);
buf_size = mtr_bufs_size(attr); buf_size = mtr_bufs_size(attr);
if (cfg->is_direct) { if (cfg->is_direct) {
/* When HEM buffer uses 0-level addressing, the page size is buf_pg_sz = HNS_HW_PAGE_SIZE;
* equal to the whole buffer size, and we split the buffer into
* small pages which is used to check whether the adjacent
* units are in the continuous space and its size is fixed to
* 4K based on hns ROCEE's requirement.
*/
page_shift = HNS_HW_PAGE_SHIFT;
/* The ROCEE requires the page size to be 4K * 2 ^ N. */
cfg->buf_pg_count = 1; cfg->buf_pg_count = 1;
/* The ROCEE requires the page size to be 4K * 2 ^ N. */
cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT + cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE)); order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
first_region_padding = 0; first_region_padding = 0;
} else { } else {
page_shift = attr->page_shift;
cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size, cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size,
1 << page_shift); 1 << attr->page_shift);
cfg->buf_pg_shift = page_shift; cfg->buf_pg_shift = attr->page_shift;
buf_pg_sz = 1 << cfg->buf_pg_shift;
first_region_padding = unalinged_size; first_region_padding = unalinged_size;
} }
...@@ -937,7 +953,7 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, ...@@ -937,7 +953,7 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
r->offset = page_cnt; r->offset = page_cnt;
buf_size = hr_hw_page_align(attr->region[region_cnt].size + buf_size = hr_hw_page_align(attr->region[region_cnt].size +
first_region_padding); first_region_padding);
r->count = DIV_ROUND_UP(buf_size, 1 << page_shift); r->count = DIV_ROUND_UP(buf_size, buf_pg_sz);
first_region_padding = 0; first_region_padding = 0;
page_cnt += r->count; page_cnt += r->count;
r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum, r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
...@@ -945,9 +961,13 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, ...@@ -945,9 +961,13 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
} }
cfg->region_count = region_cnt; cfg->region_count = region_cnt;
*buf_page_shift = page_shift; if (cfg->region_count < 1 || cfg->buf_pg_shift < HNS_HW_PAGE_SHIFT) {
ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n",
cfg->region_count, cfg->buf_pg_shift);
return -EINVAL;
}
return page_cnt; return 0;
} }
static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum) static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
...@@ -1035,18 +1055,12 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, ...@@ -1035,18 +1055,12 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
unsigned long user_addr) unsigned long user_addr)
{ {
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
unsigned int buf_page_shift = 0;
int buf_page_cnt;
int ret; int ret;
buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg, ret = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg,
&buf_page_shift, udata ? user_addr & ~PAGE_MASK : 0);
udata ? user_addr & ~PAGE_MASK : 0); if (ret)
if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) { return ret;
ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n",
buf_page_cnt, buf_page_shift);
return -EINVAL;
}
ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift); ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
if (ret) { if (ret) {
...@@ -1070,7 +1084,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, ...@@ -1070,7 +1084,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
} }
/* Write buffer's dma address to MTT */ /* Write buffer's dma address to MTT */
ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift); ret = mtr_map_bufs(hr_dev, mtr);
if (ret) if (ret)
ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret); ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment