Commit 80808d27 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Konrad Rzeszutek Wilk

swiotlb: split swiotlb_tbl_sync_single

Split swiotlb_tbl_sync_single into two separate funtions for the to device
and to cpu synchronization.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 2bdba622
...@@ -750,7 +750,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev, ...@@ -750,7 +750,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
arch_sync_dma_for_cpu(phys, size, dir); arch_sync_dma_for_cpu(phys, size, dir);
if (is_swiotlb_buffer(phys)) if (is_swiotlb_buffer(phys))
swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU); swiotlb_sync_single_for_cpu(dev, phys, size, dir);
} }
static void iommu_dma_sync_single_for_device(struct device *dev, static void iommu_dma_sync_single_for_device(struct device *dev,
...@@ -763,7 +763,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev, ...@@ -763,7 +763,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
if (is_swiotlb_buffer(phys)) if (is_swiotlb_buffer(phys))
swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE); swiotlb_sync_single_for_device(dev, phys, size, dir);
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(phys, size, dir); arch_sync_dma_for_device(phys, size, dir);
...@@ -784,8 +784,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, ...@@ -784,8 +784,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
if (is_swiotlb_buffer(sg_phys(sg))) if (is_swiotlb_buffer(sg_phys(sg)))
swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
dir, SYNC_FOR_CPU); sg->length, dir);
} }
} }
...@@ -801,8 +801,8 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, ...@@ -801,8 +801,8 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
for_each_sg(sgl, sg, nelems, i) { for_each_sg(sgl, sg, nelems, i) {
if (is_swiotlb_buffer(sg_phys(sg))) if (is_swiotlb_buffer(sg_phys(sg)))
swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, swiotlb_sync_single_for_device(dev, sg_phys(sg),
dir, SYNC_FOR_DEVICE); sg->length, dir);
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
......
...@@ -462,7 +462,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, ...@@ -462,7 +462,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
} }
if (is_xen_swiotlb_buffer(dev, dma_addr)) if (is_xen_swiotlb_buffer(dev, dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
} }
static void static void
...@@ -472,7 +472,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, ...@@ -472,7 +472,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr); phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
if (is_xen_swiotlb_buffer(dev, dma_addr)) if (is_xen_swiotlb_buffer(dev, dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); swiotlb_sync_single_for_device(dev, paddr, size, dir);
if (!dev_is_dma_coherent(dev)) { if (!dev_is_dma_coherent(dev)) {
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
......
...@@ -42,14 +42,6 @@ extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); ...@@ -42,14 +42,6 @@ extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
extern int swiotlb_late_init_with_default_size(size_t default_size); extern int swiotlb_late_init_with_default_size(size_t default_size);
extern void __init swiotlb_update_mem_attributes(void); extern void __init swiotlb_update_mem_attributes(void);
/*
* Enumeration for sync targets
*/
enum dma_sync_target {
SYNC_FOR_CPU = 0,
SYNC_FOR_DEVICE = 1,
};
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
size_t mapping_size, size_t alloc_size, size_t mapping_size, size_t alloc_size,
enum dma_data_direction dir, unsigned long attrs); enum dma_data_direction dir, unsigned long attrs);
...@@ -60,11 +52,10 @@ extern void swiotlb_tbl_unmap_single(struct device *hwdev, ...@@ -60,11 +52,10 @@ extern void swiotlb_tbl_unmap_single(struct device *hwdev,
enum dma_data_direction dir, enum dma_data_direction dir,
unsigned long attrs); unsigned long attrs);
extern void swiotlb_tbl_sync_single(struct device *hwdev, void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir);
size_t size, enum dma_data_direction dir, void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
enum dma_sync_target target); size_t size, enum dma_data_direction dir);
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs); size_t size, enum dma_data_direction dir, unsigned long attrs);
......
...@@ -344,8 +344,8 @@ void dma_direct_sync_sg_for_device(struct device *dev, ...@@ -344,8 +344,8 @@ void dma_direct_sync_sg_for_device(struct device *dev,
phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
if (unlikely(is_swiotlb_buffer(paddr))) if (unlikely(is_swiotlb_buffer(paddr)))
swiotlb_tbl_sync_single(dev, paddr, sg->length, swiotlb_sync_single_for_device(dev, paddr, sg->length,
dir, SYNC_FOR_DEVICE); dir);
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(paddr, sg->length, arch_sync_dma_for_device(paddr, sg->length,
...@@ -370,8 +370,8 @@ void dma_direct_sync_sg_for_cpu(struct device *dev, ...@@ -370,8 +370,8 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
arch_sync_dma_for_cpu(paddr, sg->length, dir); arch_sync_dma_for_cpu(paddr, sg->length, dir);
if (unlikely(is_swiotlb_buffer(paddr))) if (unlikely(is_swiotlb_buffer(paddr)))
swiotlb_tbl_sync_single(dev, paddr, sg->length, dir, swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
SYNC_FOR_CPU); dir);
if (dir == DMA_FROM_DEVICE) if (dir == DMA_FROM_DEVICE)
arch_dma_mark_clean(paddr, sg->length); arch_dma_mark_clean(paddr, sg->length);
......
...@@ -57,7 +57,7 @@ static inline void dma_direct_sync_single_for_device(struct device *dev, ...@@ -57,7 +57,7 @@ static inline void dma_direct_sync_single_for_device(struct device *dev,
phys_addr_t paddr = dma_to_phys(dev, addr); phys_addr_t paddr = dma_to_phys(dev, addr);
if (unlikely(is_swiotlb_buffer(paddr))) if (unlikely(is_swiotlb_buffer(paddr)))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); swiotlb_sync_single_for_device(dev, paddr, size, dir);
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(paddr, size, dir); arch_sync_dma_for_device(paddr, size, dir);
...@@ -74,7 +74,7 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev, ...@@ -74,7 +74,7 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev,
} }
if (unlikely(is_swiotlb_buffer(paddr))) if (unlikely(is_swiotlb_buffer(paddr)))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
if (dir == DMA_FROM_DEVICE) if (dir == DMA_FROM_DEVICE)
arch_dma_mark_clean(paddr, size); arch_dma_mark_clean(paddr, size);
......
...@@ -715,26 +715,22 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, ...@@ -715,26 +715,22 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
spin_unlock_irqrestore(&io_tlb_lock, flags); spin_unlock_irqrestore(&io_tlb_lock, flags);
} }
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir, size_t size, enum dma_data_direction dir)
enum dma_sync_target target)
{ {
switch (target) { if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
case SYNC_FOR_CPU: swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) else
swiotlb_bounce(hwdev, tlb_addr, size, DMA_FROM_DEVICE); BUG_ON(dir != DMA_FROM_DEVICE);
else }
BUG_ON(dir != DMA_TO_DEVICE);
break; void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
case SYNC_FOR_DEVICE: size_t size, enum dma_data_direction dir)
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
swiotlb_bounce(hwdev, tlb_addr, size, DMA_TO_DEVICE); if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
else swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
BUG_ON(dir != DMA_FROM_DEVICE); else
break; BUG_ON(dir != DMA_TO_DEVICE);
default:
BUG();
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment