Commit d7dff840 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Ingo Molnar

x86: remove map_single and unmap_single in struct dma_mapping_ops

This patch converts dma_map_single and dma_unmap_single to use
map_page and unmap_page respectively and removes unnecessary
map_single and unmap_single in struct dma_mapping_ops.

This leaves intel-iommu's dma_map_single and dma_unmap_single since
IA64 uses them. They will be removed after the unification.
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 33feffd4
...@@ -24,10 +24,6 @@ struct dma_mapping_ops { ...@@ -24,10 +24,6 @@ struct dma_mapping_ops {
dma_addr_t *dma_handle, gfp_t gfp); dma_addr_t *dma_handle, gfp_t gfp);
void (*free_coherent)(struct device *dev, size_t size, void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle); void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
size_t size, int direction);
void (*unmap_single)(struct device *dev, dma_addr_t addr,
size_t size, int direction);
void (*sync_single_for_cpu)(struct device *hwdev, void (*sync_single_for_cpu)(struct device *hwdev,
dma_addr_t dma_handle, size_t size, dma_addr_t dma_handle, size_t size,
int direction); int direction);
...@@ -103,7 +99,9 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size, ...@@ -103,7 +99,9 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size,
struct dma_mapping_ops *ops = get_dma_ops(hwdev); struct dma_mapping_ops *ops = get_dma_ops(hwdev);
BUG_ON(!valid_dma_direction(direction)); BUG_ON(!valid_dma_direction(direction));
return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); return ops->map_page(hwdev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
direction, NULL);
} }
static inline void static inline void
...@@ -113,8 +111,8 @@ dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, ...@@ -113,8 +111,8 @@ dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
struct dma_mapping_ops *ops = get_dma_ops(dev); struct dma_mapping_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(direction)); BUG_ON(!valid_dma_direction(direction));
if (ops->unmap_single) if (ops->unmap_page)
ops->unmap_single(dev, addr, size, direction); ops->unmap_page(dev, addr, size, direction, NULL);
} }
static inline int static inline int
...@@ -221,8 +219,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, ...@@ -221,8 +219,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
struct dma_mapping_ops *ops = get_dma_ops(dev); struct dma_mapping_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(direction)); BUG_ON(!valid_dma_direction(direction));
return ops->map_single(dev, page_to_phys(page) + offset, return ops->map_page(dev, page, offset, size, direction, NULL);
size, direction);
} }
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
......
...@@ -1341,13 +1341,6 @@ static dma_addr_t map_page(struct device *dev, struct page *page, ...@@ -1341,13 +1341,6 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
return addr; return addr;
} }
static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
size_t size, int dir)
{
return map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT),
paddr & ~PAGE_MASK, size, dir, NULL);
}
/* /*
* The exported unmap_single function for dma_ops. * The exported unmap_single function for dma_ops.
*/ */
...@@ -1378,12 +1371,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, ...@@ -1378,12 +1371,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
} }
static void unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, int dir)
{
return unmap_page(dev, dma_addr, size, dir, NULL);
}
/* /*
* This is a special map_sg function which is used if we should map a * This is a special map_sg function which is used if we should map a
* device which is not handled by an AMD IOMMU in the system. * device which is not handled by an AMD IOMMU in the system.
...@@ -1664,8 +1651,6 @@ static void prealloc_protection_domains(void) ...@@ -1664,8 +1651,6 @@ static void prealloc_protection_domains(void)
static struct dma_mapping_ops amd_iommu_dma_ops = { static struct dma_mapping_ops amd_iommu_dma_ops = {
.alloc_coherent = alloc_coherent, .alloc_coherent = alloc_coherent,
.free_coherent = free_coherent, .free_coherent = free_coherent,
.map_single = map_single,
.unmap_single = unmap_single,
.map_page = map_page, .map_page = map_page,
.unmap_page = unmap_page, .unmap_page = unmap_page,
.map_sg = map_sg, .map_sg = map_sg,
......
...@@ -461,14 +461,6 @@ static dma_addr_t calgary_map_page(struct device *dev, struct page *page, ...@@ -461,14 +461,6 @@ static dma_addr_t calgary_map_page(struct device *dev, struct page *page,
return iommu_alloc(dev, tbl, vaddr, npages, dir); return iommu_alloc(dev, tbl, vaddr, npages, dir);
} }
static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
size_t size, int direction)
{
return calgary_map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT),
paddr & ~PAGE_MASK, size,
direction, NULL);
}
static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs) struct dma_attrs *attrs)
...@@ -480,12 +472,6 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, ...@@ -480,12 +472,6 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
iommu_free(tbl, dma_addr, npages); iommu_free(tbl, dma_addr, npages);
} }
static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
size_t size, int direction)
{
calgary_unmap_page(dev, dma_handle, size, direction, NULL);
}
static void* calgary_alloc_coherent(struct device *dev, size_t size, static void* calgary_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag) dma_addr_t *dma_handle, gfp_t flag)
{ {
...@@ -535,8 +521,6 @@ static void calgary_free_coherent(struct device *dev, size_t size, ...@@ -535,8 +521,6 @@ static void calgary_free_coherent(struct device *dev, size_t size,
static struct dma_mapping_ops calgary_dma_ops = { static struct dma_mapping_ops calgary_dma_ops = {
.alloc_coherent = calgary_alloc_coherent, .alloc_coherent = calgary_alloc_coherent,
.free_coherent = calgary_free_coherent, .free_coherent = calgary_free_coherent,
.map_single = calgary_map_single,
.unmap_single = calgary_unmap_single,
.map_sg = calgary_map_sg, .map_sg = calgary_map_sg,
.unmap_sg = calgary_unmap_sg, .unmap_sg = calgary_unmap_sg,
.map_page = calgary_map_page, .map_page = calgary_map_page,
......
...@@ -275,13 +275,6 @@ static dma_addr_t gart_map_page(struct device *dev, struct page *page, ...@@ -275,13 +275,6 @@ static dma_addr_t gart_map_page(struct device *dev, struct page *page,
return bus; return bus;
} }
static dma_addr_t gart_map_single(struct device *dev, phys_addr_t paddr,
size_t size, int dir)
{
return gart_map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT),
paddr & ~PAGE_MASK, size, dir, NULL);
}
/* /*
* Free a DMA mapping. * Free a DMA mapping.
*/ */
...@@ -306,12 +299,6 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, ...@@ -306,12 +299,6 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
free_iommu(iommu_page, npages); free_iommu(iommu_page, npages);
} }
static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, int direction)
{
gart_unmap_page(dev, dma_addr, size, direction, NULL);
}
/* /*
* Wrapper for pci_unmap_single working with scatterlists. * Wrapper for pci_unmap_single working with scatterlists.
*/ */
...@@ -324,7 +311,7 @@ gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) ...@@ -324,7 +311,7 @@ gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
if (!s->dma_length || !s->length) if (!s->dma_length || !s->length)
break; break;
gart_unmap_single(dev, s->dma_address, s->dma_length, dir); gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
} }
} }
...@@ -538,7 +525,7 @@ static void ...@@ -538,7 +525,7 @@ static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr, gart_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr) dma_addr_t dma_addr)
{ {
gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL); gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long)vaddr, get_order(size)); free_pages((unsigned long)vaddr, get_order(size));
} }
...@@ -725,8 +712,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info) ...@@ -725,8 +712,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
} }
static struct dma_mapping_ops gart_dma_ops = { static struct dma_mapping_ops gart_dma_ops = {
.map_single = gart_map_single,
.unmap_single = gart_unmap_single,
.map_sg = gart_map_sg, .map_sg = gart_map_sg,
.unmap_sg = gart_unmap_sg, .unmap_sg = gart_unmap_sg,
.map_page = gart_map_page, .map_page = gart_map_page,
......
...@@ -38,13 +38,6 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, ...@@ -38,13 +38,6 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
return bus; return bus;
} }
static dma_addr_t nommu_map_single(struct device *hwdev, phys_addr_t paddr,
size_t size, int direction)
{
return nommu_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
paddr & ~PAGE_MASK, size, direction, NULL);
}
/* Map a set of buffers described by scatterlist in streaming /* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scatter-gather version of the * mode for DMA. This is the scatter-gather version of the
* above pci_map_single interface. Here the scatter gather list * above pci_map_single interface. Here the scatter gather list
...@@ -88,7 +81,6 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, ...@@ -88,7 +81,6 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
struct dma_mapping_ops nommu_dma_ops = { struct dma_mapping_ops nommu_dma_ops = {
.alloc_coherent = dma_generic_alloc_coherent, .alloc_coherent = dma_generic_alloc_coherent,
.free_coherent = nommu_free_coherent, .free_coherent = nommu_free_coherent,
.map_single = nommu_map_single,
.map_sg = nommu_map_sg, .map_sg = nommu_map_sg,
.map_page = nommu_map_page, .map_page = nommu_map_page,
.is_phys = 1, .is_phys = 1,
......
...@@ -38,13 +38,6 @@ int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) ...@@ -38,13 +38,6 @@ int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
return 0; return 0;
} }
static dma_addr_t
swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
int direction)
{
return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
}
/* these will be moved to lib/swiotlb.c later on */ /* these will be moved to lib/swiotlb.c later on */
static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
...@@ -78,8 +71,6 @@ struct dma_mapping_ops swiotlb_dma_ops = { ...@@ -78,8 +71,6 @@ struct dma_mapping_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error, .mapping_error = swiotlb_dma_mapping_error,
.alloc_coherent = x86_swiotlb_alloc_coherent, .alloc_coherent = x86_swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent, .free_coherent = swiotlb_free_coherent,
.map_single = swiotlb_map_single_phys,
.unmap_single = swiotlb_unmap_single,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
.sync_single_for_device = swiotlb_sync_single_for_device, .sync_single_for_device = swiotlb_sync_single_for_device,
.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
......
...@@ -2582,8 +2582,6 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, ...@@ -2582,8 +2582,6 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
static struct dma_mapping_ops intel_dma_ops = { static struct dma_mapping_ops intel_dma_ops = {
.alloc_coherent = intel_alloc_coherent, .alloc_coherent = intel_alloc_coherent,
.free_coherent = intel_free_coherent, .free_coherent = intel_free_coherent,
.map_single = intel_map_single,
.unmap_single = intel_unmap_single,
.map_sg = intel_map_sg, .map_sg = intel_map_sg,
.unmap_sg = intel_unmap_sg, .unmap_sg = intel_unmap_sg,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment