Commit 76418421 authored by Alexander Duyck's avatar Alexander Duyck Committed by Konrad Rzeszutek Wilk

swiotlb-xen: Enforce return of DMA_ERROR_CODE in mapping function

The mapping function should always return DMA_ERROR_CODE when a mapping has
failed as this is what the DMA API expects when a DMA error has occurred.
The current function for mapping a page in Xen was returning either
DMA_ERROR_CODE or 0 depending on where it failed.

On x86 DMA_ERROR_CODE is 0, but on other architectures such as ARM it is
~0. We need to make sure we return the same error value if either the
mapping failed or the device is not capable of accessing the mapping.

If we are returning DMA_ERROR_CODE as our error value we can drop the
function for checking the error code as the default is to compare the
return value against DMA_ERROR_CODE if no function is defined.

Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad@kernel.org>
parent ebcf6f97
...@@ -186,7 +186,6 @@ struct dma_map_ops *xen_dma_ops; ...@@ -186,7 +186,6 @@ struct dma_map_ops *xen_dma_ops;
EXPORT_SYMBOL(xen_dma_ops); EXPORT_SYMBOL(xen_dma_ops);
static struct dma_map_ops xen_swiotlb_dma_ops = { static struct dma_map_ops xen_swiotlb_dma_ops = {
.mapping_error = xen_swiotlb_dma_mapping_error,
.alloc = xen_swiotlb_alloc_coherent, .alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent, .free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
int xen_swiotlb __read_mostly; int xen_swiotlb __read_mostly;
static struct dma_map_ops xen_swiotlb_dma_ops = { static struct dma_map_ops xen_swiotlb_dma_ops = {
.mapping_error = xen_swiotlb_dma_mapping_error,
.alloc = xen_swiotlb_alloc_coherent, .alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent, .free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
......
...@@ -416,11 +416,12 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, ...@@ -416,11 +416,12 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/* /*
* Ensure that the address returned is DMA'ble * Ensure that the address returned is DMA'ble
*/ */
if (!dma_capable(dev, dev_addr, size)) { if (dma_capable(dev, dev_addr, size))
swiotlb_tbl_unmap_single(dev, map, size, dir);
dev_addr = 0;
}
return dev_addr; return dev_addr;
swiotlb_tbl_unmap_single(dev, map, size, dir);
return DMA_ERROR_CODE;
} }
EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
...@@ -648,13 +649,6 @@ xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, ...@@ -648,13 +649,6 @@ xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
} }
EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device); EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
int
xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
return !dma_addr;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
/* /*
* Return whether the given device DMA address mask can be supported * Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits * properly. For example, if your device can only drive the low 24-bits
......
...@@ -50,9 +50,6 @@ extern void ...@@ -50,9 +50,6 @@ extern void
xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir); int nelems, enum dma_data_direction dir);
extern int
xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
extern int extern int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask); xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment