Commit 249baa54 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: provide a better default ->get_required_mask

Most dma_map_ops instances are IOMMUs that work perfectly fine in 32-bits
of IOVA space, and the generic direct mapping code already provides its
own routines that is intelligent based on the amount of memory actually
present.  Wire up the dma-direct routine for the ARM direct mapping code
as well, and otherwise default to the constant 32-bit mask.  This way
we only need to override it for the occasional odd IOMMU that requires
64-bit IOVA support, or IOMMU drivers that are more efficient if they
can fall back to the direct mapping.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent d9295532
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-direct.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dma-noncoherent.h> #include <linux/dma-noncoherent.h>
#include <linux/dma-contiguous.h> #include <linux/dma-contiguous.h>
...@@ -192,6 +193,7 @@ const struct dma_map_ops arm_dma_ops = { ...@@ -192,6 +193,7 @@ const struct dma_map_ops arm_dma_ops = {
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device, .sync_sg_for_device = arm_dma_sync_sg_for_device,
.dma_supported = arm_dma_supported, .dma_supported = arm_dma_supported,
.get_required_mask = dma_direct_get_required_mask,
}; };
EXPORT_SYMBOL(arm_dma_ops); EXPORT_SYMBOL(arm_dma_ops);
...@@ -212,6 +214,7 @@ const struct dma_map_ops arm_coherent_dma_ops = { ...@@ -212,6 +214,7 @@ const struct dma_map_ops arm_coherent_dma_ops = {
.map_sg = arm_dma_map_sg, .map_sg = arm_dma_map_sg,
.map_resource = dma_direct_map_resource, .map_resource = dma_direct_map_resource,
.dma_supported = arm_dma_supported, .dma_supported = arm_dma_supported,
.get_required_mask = dma_direct_get_required_mask,
}; };
EXPORT_SYMBOL(arm_coherent_dma_ops); EXPORT_SYMBOL(arm_coherent_dma_ops);
......
...@@ -686,18 +686,12 @@ static int ps3_dma_supported(struct device *_dev, u64 mask) ...@@ -686,18 +686,12 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
return mask >= DMA_BIT_MASK(32); return mask >= DMA_BIT_MASK(32);
} }
static u64 ps3_dma_get_required_mask(struct device *_dev)
{
return DMA_BIT_MASK(32);
}
static const struct dma_map_ops ps3_sb_dma_ops = { static const struct dma_map_ops ps3_sb_dma_ops = {
.alloc = ps3_alloc_coherent, .alloc = ps3_alloc_coherent,
.free = ps3_free_coherent, .free = ps3_free_coherent,
.map_sg = ps3_sb_map_sg, .map_sg = ps3_sb_map_sg,
.unmap_sg = ps3_sb_unmap_sg, .unmap_sg = ps3_sb_unmap_sg,
.dma_supported = ps3_dma_supported, .dma_supported = ps3_dma_supported,
.get_required_mask = ps3_dma_get_required_mask,
.map_page = ps3_sb_map_page, .map_page = ps3_sb_map_page,
.unmap_page = ps3_unmap_page, .unmap_page = ps3_unmap_page,
.mmap = dma_common_mmap, .mmap = dma_common_mmap,
...@@ -710,7 +704,6 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = { ...@@ -710,7 +704,6 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = {
.map_sg = ps3_ioc0_map_sg, .map_sg = ps3_ioc0_map_sg,
.unmap_sg = ps3_ioc0_unmap_sg, .unmap_sg = ps3_ioc0_unmap_sg,
.dma_supported = ps3_dma_supported, .dma_supported = ps3_dma_supported,
.get_required_mask = ps3_dma_get_required_mask,
.map_page = ps3_ioc0_map_page, .map_page = ps3_ioc0_map_page,
.unmap_page = ps3_unmap_page, .unmap_page = ps3_unmap_page,
.mmap = dma_common_mmap, .mmap = dma_common_mmap,
......
...@@ -680,6 +680,7 @@ static const struct dma_map_ops gart_dma_ops = { ...@@ -680,6 +680,7 @@ static const struct dma_map_ops gart_dma_ops = {
.mmap = dma_common_mmap, .mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable, .get_sgtable = dma_common_get_sgtable,
.dma_supported = dma_direct_supported, .dma_supported = dma_direct_supported,
.get_required_mask = dma_direct_get_required_mask,
}; };
static void gart_iommu_shutdown(void) static void gart_iommu_shutdown(void)
......
...@@ -271,25 +271,6 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, ...@@ -271,25 +271,6 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
} }
EXPORT_SYMBOL(dma_mmap_attrs); EXPORT_SYMBOL(dma_mmap_attrs);
static u64 dma_default_get_required_mask(struct device *dev)
{
u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
u64 mask;
if (!high_totalram) {
/* convert to mask just covering totalram */
low_totalram = (1 << (fls(low_totalram) - 1));
low_totalram += low_totalram - 1;
mask = low_totalram;
} else {
high_totalram = (1 << (fls(high_totalram) - 1));
high_totalram += high_totalram - 1;
mask = (((u64)high_totalram) << 32) + 0xffffffff;
}
return mask;
}
u64 dma_get_required_mask(struct device *dev) u64 dma_get_required_mask(struct device *dev)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
...@@ -298,7 +279,16 @@ u64 dma_get_required_mask(struct device *dev) ...@@ -298,7 +279,16 @@ u64 dma_get_required_mask(struct device *dev)
return dma_direct_get_required_mask(dev); return dma_direct_get_required_mask(dev);
if (ops->get_required_mask) if (ops->get_required_mask)
return ops->get_required_mask(dev); return ops->get_required_mask(dev);
return dma_default_get_required_mask(dev);
/*
* We require every DMA ops implementation to at least support a 32-bit
* DMA mask (and use bounce buffering if that isn't supported in
* hardware). As the direct mapping code has its own routine to
* actually report an optimal mask we default to 32-bit here as that
* is the right thing for most IOMMUs, and at least not actively
* harmful in general.
*/
return DMA_BIT_MASK(32);
} }
EXPORT_SYMBOL_GPL(dma_get_required_mask); EXPORT_SYMBOL_GPL(dma_get_required_mask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment