Commit 5a47910d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Michael Ellerman

powerpc/dma: remove dma_nommu_get_required_mask

This function is identical to the generic dma_direct_get_required_mask,
except that the generic version also takes the bus_dma_mask account,
which could lead to incorrect results in the powerpc version.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarChristian Zigotzky <chzigotzky@xenosoft.de>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 6666cc17
...@@ -32,7 +32,6 @@ dma_addr_t dma_nommu_map_page(struct device *dev, struct page *page, ...@@ -32,7 +32,6 @@ dma_addr_t dma_nommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs); enum dma_data_direction dir, unsigned long attrs);
int dma_nommu_dma_supported(struct device *dev, u64 mask); int dma_nommu_dma_supported(struct device *dev, u64 mask);
u64 dma_nommu_get_required_mask(struct device *dev);
#ifdef CONFIG_NOT_COHERENT_CACHE #ifdef CONFIG_NOT_COHERENT_CACHE
/* /*
......
...@@ -152,7 +152,7 @@ u64 dma_iommu_get_required_mask(struct device *dev) ...@@ -152,7 +152,7 @@ u64 dma_iommu_get_required_mask(struct device *dev)
return 0; return 0;
if (dev_is_pci(dev)) { if (dev_is_pci(dev)) {
u64 bypass_mask = dma_nommu_get_required_mask(dev); u64 bypass_mask = dma_direct_get_required_mask(dev);
if (dma_iommu_bypass_supported(dev, bypass_mask)) if (dma_iommu_bypass_supported(dev, bypass_mask))
return bypass_mask; return bypass_mask;
......
...@@ -145,18 +145,6 @@ static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sgl, ...@@ -145,18 +145,6 @@ static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
} }
u64 dma_nommu_get_required_mask(struct device *dev)
{
u64 end, mask;
end = memblock_end_of_DRAM() + get_dma_offset(dev);
mask = 1ULL << (fls64(end) - 1);
mask += mask - 1;
return mask;
}
dma_addr_t dma_nommu_map_page(struct device *dev, struct page *page, dma_addr_t dma_nommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs) enum dma_data_direction dir, unsigned long attrs)
...@@ -205,7 +193,7 @@ const struct dma_map_ops dma_nommu_ops = { ...@@ -205,7 +193,7 @@ const struct dma_map_ops dma_nommu_ops = {
.dma_supported = dma_nommu_dma_supported, .dma_supported = dma_nommu_dma_supported,
.map_page = dma_nommu_map_page, .map_page = dma_nommu_map_page,
.unmap_page = dma_nommu_unmap_page, .unmap_page = dma_nommu_unmap_page,
.get_required_mask = dma_nommu_get_required_mask, .get_required_mask = dma_direct_get_required_mask,
#ifdef CONFIG_NOT_COHERENT_CACHE #ifdef CONFIG_NOT_COHERENT_CACHE
.sync_single_for_cpu = dma_nommu_sync_single, .sync_single_for_cpu = dma_nommu_sync_single,
.sync_single_for_device = dma_nommu_sync_single, .sync_single_for_device = dma_nommu_sync_single,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment