Commit 063b8271 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Konrad Rzeszutek Wilk

swiotlb-xen: ensure we have a single callsite for xen_dma_map_page

Refactor the code a bit to make further changes easier.
Reviewed-by: default avatarStefano Stabellini <sstabellini@kernel.org>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 2e12dcee
...@@ -388,13 +388,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, ...@@ -388,13 +388,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (dma_capable(dev, dev_addr, size) && if (dma_capable(dev, dev_addr, size) &&
!range_straddles_page_boundary(phys, size) && !range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) && !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
(swiotlb_force != SWIOTLB_FORCE)) { swiotlb_force != SWIOTLB_FORCE)
/* we are not interested in the dma_addr returned by goto done;
* xen_dma_map_page, only in the potential cache flushes executed
* by the function. */
xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
return dev_addr;
}
/* /*
* Oh well, have to allocate and map a bounce buffer. * Oh well, have to allocate and map a bounce buffer.
...@@ -407,19 +402,25 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, ...@@ -407,19 +402,25 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
dev_addr = xen_phys_to_bus(map); dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr, map & ~PAGE_MASK, size, dir, attrs);
/* /*
* Ensure that the address returned is DMA'ble * Ensure that the address returned is DMA'ble
*/ */
if (dma_capable(dev, dev_addr, size)) if (unlikely(!dma_capable(dev, dev_addr, size))) {
return dev_addr; swiotlb_tbl_unmap_single(dev, map, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
}
page = pfn_to_page(map >> PAGE_SHIFT);
offset = map & ~PAGE_MASK;
done:
/*
* we are not interested in the dma_addr returned by xen_dma_map_page,
* only in the potential cache flushes executed by the function.
*/
xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
return dev_addr;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment