Commit 69b052e8 authored by Robert Jennings's avatar Robert Jennings Committed by Benjamin Herrenschmidt

powerpc/pseries: Correct VIO bus accounting problem in CMO env.

In the VIO bus code the wrappers for dma alloc_coherent and free_coherent
calls are rounding to IOMMU_PAGE_SIZE.  Taking a look at the underlying
calls, the actual mapping is promoted to PAGE_SIZE.  Changing the
rounding in these two functions fixes under-reporting the entitlement
used by the system.  Without this change, the system could run out of
entitlement before it believes it has and incur mapping failures at the
firmware level.

Also in the VIO bus code, the wrapper for dma map_sg is not exiting in
an error path where it should.  Rather than fall through to code for the
success case, this patch adds the return that is needed in the error path.
Signed-off-by: default avatarRobert Jennings <rcj@linux.vnet.ibm.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 4712fff9
...@@ -492,14 +492,14 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, ...@@ -492,14 +492,14 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
struct vio_dev *viodev = to_vio_dev(dev); struct vio_dev *viodev = to_vio_dev(dev);
void *ret; void *ret;
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) { if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
atomic_inc(&viodev->cmo.allocs_failed); atomic_inc(&viodev->cmo.allocs_failed);
return NULL; return NULL;
} }
ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag); ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
if (unlikely(ret == NULL)) { if (unlikely(ret == NULL)) {
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed); atomic_inc(&viodev->cmo.allocs_failed);
} }
...@@ -513,7 +513,7 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, ...@@ -513,7 +513,7 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle); dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
} }
static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
...@@ -572,6 +572,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -572,6 +572,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
if (unlikely(!ret)) { if (unlikely(!ret)) {
vio_cmo_dealloc(viodev, alloc_size); vio_cmo_dealloc(viodev, alloc_size);
atomic_inc(&viodev->cmo.allocs_failed); atomic_inc(&viodev->cmo.allocs_failed);
return ret;
} }
for (sgl = sglist, count = 0; count < ret; count++, sgl++) for (sgl = sglist, count = 0; count < ret; count++, sgl++)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment