Commit ac2e8860 authored by Christoph Hellwig's avatar Christoph Hellwig

mips: use swiotlb_{alloc,free}

These already include the GFP_DMA/GFP_DMA32 usage, and will use CMA
memory if enabled, thus avoiding the GFP_NORETRY hack.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
parent 1c33b6b7
......@@ -75,6 +75,7 @@ config NEED_SG_DMA_LENGTH
config SWIOTLB
def_bool y
select DMA_DIRECT_OPS
select IOMMU_HELPER
select NEED_SG_DMA_LENGTH
......
......@@ -159,33 +159,13 @@ static void octeon_dma_sync_sg_for_device(struct device *dev,
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
if (IS_ENABLED(CONFIG_ZONE_DMA) && dev == NULL)
gfp |= __GFP_DMA;
else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(24))
gfp |= __GFP_DMA;
else if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
gfp |= __GFP_DMA32;
/* Don't invoke OOM killer */
gfp |= __GFP_NORETRY;
ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
mb();
return ret;
}
static void octeon_dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr;
......@@ -225,7 +205,7 @@ EXPORT_SYMBOL(dma_to_phys);
static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
.dma_map_ops = {
.alloc = octeon_dma_alloc_coherent,
.free = octeon_dma_free_coherent,
.free = swiotlb_free,
.map_page = octeon_dma_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = octeon_dma_map_sg,
......@@ -311,7 +291,7 @@ void __init plat_swiotlb_setup(void)
static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
.dma_map_ops = {
.alloc = octeon_dma_alloc_coherent,
.free = octeon_dma_free_coherent,
.free = swiotlb_free,
.map_page = octeon_dma_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = octeon_dma_map_sg,
......
......@@ -136,6 +136,7 @@ config SWIOTLB
bool "Soft IOMMU Support for All-Memory DMA"
default y
depends on CPU_LOONGSON3
select DMA_DIRECT_OPS
select IOMMU_HELPER
select NEED_SG_DMA_LENGTH
select NEED_DMA_MAP_STATE
......
......@@ -13,29 +13,12 @@
static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
if ((IS_ENABLED(CONFIG_ISA) && dev == NULL) ||
(IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask < DMA_BIT_MASK(32)))
gfp |= __GFP_DMA;
else if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
dev->coherent_dma_mask < DMA_BIT_MASK(40))
gfp |= __GFP_DMA32;
gfp |= __GFP_NORETRY;
ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
mb();
return ret;
}
static void loongson_dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
......@@ -106,7 +89,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
static const struct dma_map_ops loongson_dma_map_ops = {
.alloc = loongson_dma_alloc_coherent,
.free = loongson_dma_free_coherent,
.free = swiotlb_free,
.map_page = loongson_dma_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = loongson_dma_map_sg,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment