Commit 096339ab authored by Gaurav Batra's avatar Gaurav Batra Committed by Michael Ellerman

powerpc/iommu: DMA address offset is incorrectly calculated with 2MB TCEs

When DMA window is backed by 2MB TCEs, the DMA address for the mapped
page should be the offset of the page relative to the 2MB TCE. The code
was incorrectly setting the DMA address to the beginning of the TCE
range.

Mellanox driver is reporting timeout trying to ENABLE_HCA for an SR-IOV
ethernet port, when DMA window is backed by 2MB TCEs.

Fixes: 38727311 ("powerps/pseries/dma: Add support for 2M IOMMU page size")
Cc: stable@vger.kernel.org # v5.16+
Signed-off-by: default avatarGaurav Batra <gbatra@linux.vnet.ibm.com>
Reviewed-by: default avatarGreg Joyce <gjoyce@linux.vnet.ibm.com>
Reviewed-by: default avatarBrian King <brking@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230504175913.83844-1-gbatra@linux.vnet.ibm.com
parent ad593827
...@@ -518,7 +518,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, ...@@ -518,7 +518,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Convert entry to a dma_addr_t */ /* Convert entry to a dma_addr_t */
entry += tbl->it_offset; entry += tbl->it_offset;
dma_addr = entry << tbl->it_page_shift; dma_addr = entry << tbl->it_page_shift;
dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
npages, entry, dma_addr); npages, entry, dma_addr);
...@@ -905,6 +905,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, ...@@ -905,6 +905,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
unsigned int order; unsigned int order;
unsigned int nio_pages, io_order; unsigned int nio_pages, io_order;
struct page *page; struct page *page;
int tcesize = (1 << tbl->it_page_shift);
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
order = get_order(size); order = get_order(size);
...@@ -931,7 +932,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, ...@@ -931,7 +932,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
memset(ret, 0, size); memset(ret, 0, size);
/* Set up tces to cover the allocated range */ /* Set up tces to cover the allocated range */
nio_pages = size >> tbl->it_page_shift; nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
io_order = get_iommu_order(size, tbl); io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> tbl->it_page_shift, io_order, 0); mask >> tbl->it_page_shift, io_order, 0);
...@@ -939,7 +941,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, ...@@ -939,7 +941,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
free_pages((unsigned long)ret, order); free_pages((unsigned long)ret, order);
return NULL; return NULL;
} }
*dma_handle = mapping;
*dma_handle = mapping | ((u64)ret & (tcesize - 1));
return ret; return ret;
} }
...@@ -950,7 +953,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size, ...@@ -950,7 +953,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
unsigned int nio_pages; unsigned int nio_pages;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
nio_pages = size >> tbl->it_page_shift; nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
iommu_free(tbl, dma_handle, nio_pages); iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
free_pages((unsigned long)vaddr, get_order(size)); free_pages((unsigned long)vaddr, get_order(size));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment