Commit 378efe79 authored by Wei Hu\(Xavier\)'s avatar Wei Hu\(Xavier\) Committed by Jason Gunthorpe

RDMA/hns: Get rid of page operation after dma_alloc_coherent

In general, dma_alloc_coherent() returns a CPU virtual address and
a DMA address, and we have no guarantee that the underlying memory
even has an associated struct page at all.

This patch gets rid of the page operation after dma_alloc_coherent,
and records the VA returned form dma_alloc_coherent in the struct
of hem in hns RoCE driver.

Fixes: 9a443537("IB/hns: Add driver files for hns RoCE driver")
Signed-off-by: default avatarWei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: default avatarShaobo Xu <xushaobo2@huawei.com>
Signed-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarYixian Liu <liuyixian@huawei.com>
Signed-off-by: default avatarXiping Zhang (Francis) <zhangxiping3@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent b1c15835
...@@ -224,6 +224,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, ...@@ -224,6 +224,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN); sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
chunk->npages = 0; chunk->npages = 0;
chunk->nsg = 0; chunk->nsg = 0;
memset(chunk->buf, 0, sizeof(chunk->buf));
list_add_tail(&chunk->list, &hem->chunk_list); list_add_tail(&chunk->list, &hem->chunk_list);
} }
...@@ -240,8 +241,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, ...@@ -240,8 +241,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
if (!buf) if (!buf)
goto fail; goto fail;
sg_set_buf(mem, buf, PAGE_SIZE << order); chunk->buf[chunk->npages] = buf;
WARN_ON(mem->offset);
sg_dma_len(mem) = PAGE_SIZE << order; sg_dma_len(mem) = PAGE_SIZE << order;
++chunk->npages; ++chunk->npages;
...@@ -267,8 +267,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem) ...@@ -267,8 +267,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) { list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(hr_dev->dev, dma_free_coherent(hr_dev->dev,
chunk->mem[i].length, sg_dma_len(&chunk->mem[i]),
lowmem_page_address(sg_page(&chunk->mem[i])), chunk->buf[i],
sg_dma_address(&chunk->mem[i])); sg_dma_address(&chunk->mem[i]));
kfree(chunk); kfree(chunk);
} }
...@@ -722,11 +722,12 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, ...@@ -722,11 +722,12 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_chunk *chunk; struct hns_roce_hem_chunk *chunk;
struct hns_roce_hem_mhop mhop; struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem; struct hns_roce_hem *hem;
struct page *page = NULL; void *addr = NULL;
unsigned long mhop_obj = obj; unsigned long mhop_obj = obj;
unsigned long obj_per_chunk; unsigned long obj_per_chunk;
unsigned long idx_offset; unsigned long idx_offset;
int offset, dma_offset; int offset, dma_offset;
int length;
int i, j; int i, j;
u32 hem_idx = 0; u32 hem_idx = 0;
...@@ -763,25 +764,25 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, ...@@ -763,25 +764,25 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
list_for_each_entry(chunk, &hem->chunk_list, list) { list_for_each_entry(chunk, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) { for (i = 0; i < chunk->npages; ++i) {
length = sg_dma_len(&chunk->mem[i]);
if (dma_handle && dma_offset >= 0) { if (dma_handle && dma_offset >= 0) {
if (sg_dma_len(&chunk->mem[i]) > if (length > (u32)dma_offset)
(u32)dma_offset)
*dma_handle = sg_dma_address( *dma_handle = sg_dma_address(
&chunk->mem[i]) + dma_offset; &chunk->mem[i]) + dma_offset;
dma_offset -= sg_dma_len(&chunk->mem[i]); dma_offset -= length;
} }
if (chunk->mem[i].length > (u32)offset) { if (length > (u32)offset) {
page = sg_page(&chunk->mem[i]); addr = chunk->buf[i] + offset;
goto out; goto out;
} }
offset -= chunk->mem[i].length; offset -= length;
} }
} }
out: out:
mutex_unlock(&table->mutex); mutex_unlock(&table->mutex);
return page ? lowmem_page_address(page) + offset : NULL; return addr;
} }
EXPORT_SYMBOL_GPL(hns_roce_table_find); EXPORT_SYMBOL_GPL(hns_roce_table_find);
......
...@@ -78,6 +78,7 @@ struct hns_roce_hem_chunk { ...@@ -78,6 +78,7 @@ struct hns_roce_hem_chunk {
int npages; int npages;
int nsg; int nsg;
struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN]; struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN];
void *buf[HNS_ROCE_HEM_CHUNK_LEN];
}; };
struct hns_roce_hem { struct hns_roce_hem {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment