Commit bb644f61 authored by Tomer Tayar's avatar Tomer Tayar Committed by Oded Gabbay

accel/habanalabs: fix SG table creation for dma-buf mapping

In some cases the calculated number of required entries for the dma-buf
SG table is wrong. For example, if the page size is larger than both the
dma max segment size of the importer device and from the exported side,
or if the exported size is part of a phys_pg_pack that is composed of
several pages.
In these cases, redundant entries will be added to the SG table.

Modify the method that the number of entries is calculated, and the way
they are prepared.
Signed-off-by: default avatarTomer Tayar <ttayar@habana.ai>
Reviewed-by: default avatarOded Gabbay <ogabbay@kernel.org>
Signed-off-by: default avatarOded Gabbay <ogabbay@kernel.org>
parent ba24b5ec
...@@ -1535,21 +1535,17 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 ...@@ -1535,21 +1535,17 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
u64 page_size, u64 exported_size, u64 page_size, u64 exported_size,
struct device *dev, enum dma_data_direction dir) struct device *dev, enum dma_data_direction dir)
{ {
u64 chunk_size, bar_address, dma_max_seg_size, cur_size_to_export, cur_npages; u64 dma_max_seg_size, curr_page, size, chunk_size, left_size_to_export, left_size_in_page,
struct asic_fixed_properties *prop; left_size_in_dma_seg, device_address, bar_address;
int rc, i, j, nents, cur_page; struct asic_fixed_properties *prop = &hdev->asic_prop;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int nents, i;
struct sg_table *sgt; struct sg_table *sgt;
bool next_sg_entry;
int rc;
prop = &hdev->asic_prop; /* Align max segment size to PAGE_SIZE to fit the minimal IOMMU mapping granularity */
dma_max_seg_size = ALIGN_DOWN(dma_get_max_seg_size(dev), PAGE_SIZE);
dma_max_seg_size = dma_get_max_seg_size(dev);
/* We would like to align the max segment size to PAGE_SIZE, so the
* SGL will contain aligned addresses that can be easily mapped to
* an MMU
*/
dma_max_seg_size = ALIGN_DOWN(dma_max_seg_size, PAGE_SIZE);
if (dma_max_seg_size < PAGE_SIZE) { if (dma_max_seg_size < PAGE_SIZE) {
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"dma_max_seg_size %llu can't be smaller than PAGE_SIZE\n", "dma_max_seg_size %llu can't be smaller than PAGE_SIZE\n",
...@@ -1561,120 +1557,133 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 ...@@ -1561,120 +1557,133 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
if (!sgt) if (!sgt)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
cur_size_to_export = exported_size; /* Calculate the required number of entries for the SG table */
curr_page = 0;
nents = 1;
left_size_to_export = exported_size;
left_size_in_page = page_size;
left_size_in_dma_seg = dma_max_seg_size;
next_sg_entry = false;
while (true) {
size = min3(left_size_to_export, left_size_in_page, left_size_in_dma_seg);
left_size_to_export -= size;
left_size_in_page -= size;
left_size_in_dma_seg -= size;
if (!left_size_to_export)
break;
/* If the size of each page is larger than the dma max segment size, if (!left_size_in_page) {
* then we can't combine pages and the number of entries in the SGL /* left_size_to_export is not zero so there must be another page */
* will just be the if (pages[curr_page] + page_size != pages[curr_page + 1])
* <number of pages> * <chunks of max segment size in each page> next_sg_entry = true;
*/
if (page_size > dma_max_seg_size) { ++curr_page;
/* we should limit number of pages according to the exported size */ left_size_in_page = page_size;
cur_npages = DIV_ROUND_UP_SECTOR_T(cur_size_to_export, page_size); }
nents = cur_npages * DIV_ROUND_UP_SECTOR_T(page_size, dma_max_seg_size);
} else {
cur_npages = npages;
/* Get number of non-contiguous chunks */
for (i = 1, nents = 1, chunk_size = page_size ; i < cur_npages ; i++) {
if (pages[i - 1] + page_size != pages[i] ||
chunk_size + page_size > dma_max_seg_size) {
nents++;
chunk_size = page_size;
continue;
}
chunk_size += page_size; if (!left_size_in_dma_seg) {
next_sg_entry = true;
left_size_in_dma_seg = dma_max_seg_size;
}
if (next_sg_entry) {
++nents;
next_sg_entry = false;
} }
} }
rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO); rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
if (rc) if (rc)
goto error_free; goto err_free_sgt;
cur_page = 0;
if (page_size > dma_max_seg_size) {
u64 size_left, cur_device_address = 0;
size_left = page_size; /* Prepare the SG table entries */
curr_page = 0;
device_address = pages[curr_page];
left_size_to_export = exported_size;
left_size_in_page = page_size;
left_size_in_dma_seg = dma_max_seg_size;
next_sg_entry = false;
/* Need to split each page into the number of chunks of for_each_sgtable_dma_sg(sgt, sg, i) {
* dma_max_seg_size bar_address = hdev->dram_pci_bar_start + (device_address - prop->dram_base_address);
*/ chunk_size = 0;
for_each_sgtable_dma_sg(sgt, sg, i) {
if (size_left == page_size) for ( ; curr_page < npages ; ++curr_page) {
cur_device_address = size = min3(left_size_to_export, left_size_in_page, left_size_in_dma_seg);
pages[cur_page] - prop->dram_base_address; chunk_size += size;
else left_size_to_export -= size;
cur_device_address += dma_max_seg_size; left_size_in_page -= size;
left_size_in_dma_seg -= size;
/* make sure not to export over exported size */
chunk_size = min3(size_left, dma_max_seg_size, cur_size_to_export); if (!left_size_to_export)
break;
bar_address = hdev->dram_pci_bar_start + cur_device_address;
if (!left_size_in_page) {
rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir); /* left_size_to_export is not zero so there must be another page */
if (rc) if (pages[curr_page] + page_size != pages[curr_page + 1]) {
goto error_unmap; device_address = pages[curr_page + 1];
next_sg_entry = true;
}
left_size_in_page = page_size;
}
cur_size_to_export -= chunk_size; if (!left_size_in_dma_seg) {
/*
* Skip setting a new device address if already moving to a page
* which is not contiguous with the current page.
*/
if (!next_sg_entry) {
device_address += chunk_size;
next_sg_entry = true;
}
left_size_in_dma_seg = dma_max_seg_size;
}
if (size_left > dma_max_seg_size) { if (next_sg_entry) {
size_left -= dma_max_seg_size; next_sg_entry = false;
} else { break;
cur_page++;
size_left = page_size;
} }
} }
} else {
/* Merge pages and put them into the scatterlist */
for_each_sgtable_dma_sg(sgt, sg, i) {
chunk_size = page_size;
for (j = cur_page + 1 ; j < cur_npages ; j++) {
if (pages[j - 1] + page_size != pages[j] ||
chunk_size + page_size > dma_max_seg_size)
break;
chunk_size += page_size;
}
bar_address = hdev->dram_pci_bar_start +
(pages[cur_page] - prop->dram_base_address);
/* make sure not to export over exported size */ rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
chunk_size = min(chunk_size, cur_size_to_export); if (rc)
rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir); goto err_unmap;
if (rc) }
goto error_unmap;
cur_size_to_export -= chunk_size; /* There should be nothing left to export exactly after looping over all SG elements */
cur_page = j; if (left_size_to_export) {
} dev_err(hdev->dev,
"left size to export %#llx after initializing %u SG elements\n",
left_size_to_export, sgt->nents);
rc = -ENOMEM;
goto err_unmap;
} }
/* Because we are not going to include a CPU list we want to have some /*
* chance that other users will detect this by setting the orig_nents * Because we are not going to include a CPU list, we want to have some chance that other
* to 0 and using only nents (length of DMA list) when going over the * users will detect this when going over SG table, by setting the orig_nents to 0 and using
* sgl * only nents (length of DMA list).
*/ */
sgt->orig_nents = 0; sgt->orig_nents = 0;
return sgt; return sgt;
error_unmap: err_unmap:
for_each_sgtable_dma_sg(sgt, sg, i) { for_each_sgtable_dma_sg(sgt, sg, i) {
if (!sg_dma_len(sg)) if (!sg_dma_len(sg))
continue; continue;
dma_unmap_resource(dev, sg_dma_address(sg), dma_unmap_resource(dev, sg_dma_address(sg), sg_dma_len(sg), dir,
sg_dma_len(sg), dir,
DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
} }
sg_free_table(sgt); sg_free_table(sgt);
error_free: err_free_sgt:
kfree(sgt); kfree(sgt);
return ERR_PTR(rc); return ERR_PTR(rc);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment