Commit e7284982 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Michael Ellerman

powerpc/dma: remove the iommu fallback for coherent allocations

All iommu capable platforms now always use the iommu code with the
internal bypass, so there is not need for this magic anymore.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarChristian Zigotzky <chzigotzky@xenosoft.de>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 662acad4
...@@ -119,9 +119,6 @@ config GENERIC_HWEIGHT ...@@ -119,9 +119,6 @@ config GENERIC_HWEIGHT
bool bool
default y default y
config ARCH_HAS_DMA_SET_COHERENT_MASK
bool
config PPC config PPC
bool bool
default y default y
...@@ -130,7 +127,6 @@ config PPC ...@@ -130,7 +127,6 @@ config PPC
# #
select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_SET_COHERENT_MASK
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
......
...@@ -115,51 +115,6 @@ void __dma_nommu_free_coherent(struct device *dev, size_t size, ...@@ -115,51 +115,6 @@ void __dma_nommu_free_coherent(struct device *dev, size_t size,
} }
#endif /* !CONFIG_NOT_COHERENT_CACHE */ #endif /* !CONFIG_NOT_COHERENT_CACHE */
static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
struct iommu_table *iommu;
/* The coherent mask may be smaller than the real mask, check if
* we can really use the direct ops
*/
if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
return __dma_nommu_alloc_coherent(dev, size, dma_handle,
flag, attrs);
/* Ok we can't ... do we have an iommu ? If not, fail */
iommu = get_iommu_table_base(dev);
if (!iommu)
return NULL;
/* Try to use the iommu */
return iommu_alloc_coherent(dev, iommu, size, dma_handle,
dev->coherent_dma_mask, flag,
dev_to_node(dev));
}
static void dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
struct iommu_table *iommu;
/* See comments in dma_nommu_alloc_coherent() */
if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle,
attrs);
/* Maybe we used an iommu ... */
iommu = get_iommu_table_base(dev);
/* If we hit that we should have never allocated in the first
* place so how come we are freeing ?
*/
if (WARN_ON(!iommu))
return;
iommu_free_coherent(iommu, size, vaddr, dma_handle);
}
int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma, int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size, void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs) unsigned long attrs)
...@@ -262,8 +217,8 @@ static inline void dma_nommu_sync_single(struct device *dev, ...@@ -262,8 +217,8 @@ static inline void dma_nommu_sync_single(struct device *dev,
#endif #endif
const struct dma_map_ops dma_nommu_ops = { const struct dma_map_ops dma_nommu_ops = {
.alloc = dma_nommu_alloc_coherent, .alloc = __dma_nommu_alloc_coherent,
.free = dma_nommu_free_coherent, .free = __dma_nommu_free_coherent,
.mmap = dma_nommu_mmap_coherent, .mmap = dma_nommu_mmap_coherent,
.map_sg = dma_nommu_map_sg, .map_sg = dma_nommu_map_sg,
.unmap_sg = dma_nommu_unmap_sg, .unmap_sg = dma_nommu_unmap_sg,
...@@ -280,25 +235,6 @@ const struct dma_map_ops dma_nommu_ops = { ...@@ -280,25 +235,6 @@ const struct dma_map_ops dma_nommu_ops = {
}; };
EXPORT_SYMBOL(dma_nommu_ops); EXPORT_SYMBOL(dma_nommu_ops);
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (!dma_supported(dev, mask)) {
/*
* We need to special case the direct DMA ops which can
* support a fallback for coherent allocations. There
* is no dma_op->set_coherent_mask() so we have to do
* things the hard way:
*/
if (get_dma_ops(dev) != &dma_nommu_ops ||
get_iommu_table_base(dev) == NULL ||
!dma_iommu_dma_supported(dev, mask))
return -EIO;
}
dev->coherent_dma_mask = mask;
return 0;
}
EXPORT_SYMBOL(dma_set_coherent_mask);
int dma_set_mask(struct device *dev, u64 dma_mask) int dma_set_mask(struct device *dev, u64 dma_mask)
{ {
if (ppc_md.dma_set_mask) if (ppc_md.dma_set_mask)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment