Commit 65a21b71 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Michael Ellerman

powerpc/dma: remove dma_nommu_dma_supported

This function is largely identical to the generic version used
everywhere else.  Replace it with the generic version.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarChristian Zigotzky <chzigotzky@xenosoft.de>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 5a47910d
...@@ -31,7 +31,6 @@ int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl, ...@@ -31,7 +31,6 @@ int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
dma_addr_t dma_nommu_map_page(struct device *dev, struct page *page, dma_addr_t dma_nommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs); enum dma_data_direction dir, unsigned long attrs);
int dma_nommu_dma_supported(struct device *dev, u64 mask);
#ifdef CONFIG_NOT_COHERENT_CACHE #ifdef CONFIG_NOT_COHERENT_CACHE
/* /*
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
static inline bool dma_iommu_alloc_bypass(struct device *dev) static inline bool dma_iommu_alloc_bypass(struct device *dev)
{ {
return dev->archdata.iommu_bypass && !iommu_fixed_is_weak && return dev->archdata.iommu_bypass && !iommu_fixed_is_weak &&
dma_nommu_dma_supported(dev, dev->coherent_dma_mask); dma_direct_supported(dev, dev->coherent_dma_mask);
} }
static inline bool dma_iommu_map_bypass(struct device *dev, static inline bool dma_iommu_map_bypass(struct device *dev,
......
...@@ -39,29 +39,6 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev) ...@@ -39,29 +39,6 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev)
return pfn; return pfn;
} }
int dma_nommu_dma_supported(struct device *dev, u64 mask)
{
#ifdef CONFIG_PPC64
u64 limit = phys_to_dma(dev, (memblock_end_of_DRAM() - 1));
/* Limit fits in the mask, we are good */
if (mask >= limit)
return 1;
#ifdef CONFIG_FSL_SOC
/*
* Freescale gets another chance via ZONE_DMA, however
* that will have to be refined if/when they support iommus
*/
return 1;
#endif
/* Sorry ... */
return 0;
#else
return 1;
#endif
}
#ifndef CONFIG_NOT_COHERENT_CACHE #ifndef CONFIG_NOT_COHERENT_CACHE
void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag, dma_addr_t *dma_handle, gfp_t flag,
...@@ -190,7 +167,7 @@ const struct dma_map_ops dma_nommu_ops = { ...@@ -190,7 +167,7 @@ const struct dma_map_ops dma_nommu_ops = {
.free = __dma_nommu_free_coherent, .free = __dma_nommu_free_coherent,
.map_sg = dma_nommu_map_sg, .map_sg = dma_nommu_map_sg,
.unmap_sg = dma_nommu_unmap_sg, .unmap_sg = dma_nommu_unmap_sg,
.dma_supported = dma_nommu_dma_supported, .dma_supported = dma_direct_supported,
.map_page = dma_nommu_map_page, .map_page = dma_nommu_map_page,
.unmap_page = dma_nommu_unmap_page, .unmap_page = dma_nommu_unmap_page,
.get_required_mask = dma_direct_get_required_mask, .get_required_mask = dma_direct_get_required_mask,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment