Commit e2b745f4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-4.21-1' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:
 "Fix various regressions introduced in this cycles:

   - fix dma-debug tracking for the map_page / map_single
     consolidatation

   - properly stub out DMA mapping symbols for !HAS_DMA builds to avoid
     link failures

   - fix AMD Gart direct mappings

   - setup the dma address for no kernel mappings using the remap
     allocator"

* tag 'dma-mapping-4.21-1' of git://git.infradead.org/users/hch/dma-mapping:
  dma-direct: fix DMA_ATTR_NO_KERNEL_MAPPING for remapped allocations
  x86/amd_gart: fix unmapping of non-GART mappings
  dma-mapping: remove a few unused exports
  dma-mapping: properly stub out the DMA API for !CONFIG_HAS_DMA
  dma-mapping: remove dmam_{declare,release}_coherent_memory
  dma-mapping: implement dmam_alloc_coherent using dmam_alloc_attrs
  dma-mapping: implement dma_map_single_attrs using dma_map_page_attrs
parents 12133258 8270f3a1
...@@ -250,7 +250,6 @@ DMA ...@@ -250,7 +250,6 @@ DMA
dmaenginem_async_device_register() dmaenginem_async_device_register()
dmam_alloc_coherent() dmam_alloc_coherent()
dmam_alloc_attrs() dmam_alloc_attrs()
dmam_declare_coherent_memory()
dmam_free_coherent() dmam_free_coherent()
dmam_pool_create() dmam_pool_create()
dmam_pool_destroy() dmam_pool_destroy()
......
...@@ -256,7 +256,15 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, ...@@ -256,7 +256,15 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
int npages; int npages;
int i; int i;
if (dma_addr == DMA_MAPPING_ERROR || if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR))
return;
/*
* This driver will not always use a GART mapping, but might have
* created a direct mapping instead. If that is the case there is
* nothing to unmap here.
*/
if (dma_addr < iommu_bus_base ||
dma_addr >= iommu_bus_base + iommu_size) dma_addr >= iommu_bus_base + iommu_size)
return; return;
......
...@@ -35,13 +35,12 @@ extern void debug_dma_map_single(struct device *dev, const void *addr, ...@@ -35,13 +35,12 @@ extern void debug_dma_map_single(struct device *dev, const void *addr,
extern void debug_dma_map_page(struct device *dev, struct page *page, extern void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size, size_t offset, size_t size,
int direction, dma_addr_t dma_addr, int direction, dma_addr_t dma_addr);
bool map_single);
extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction, bool map_single); size_t size, int direction);
extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int mapped_ents, int direction); int nents, int mapped_ents, int direction);
...@@ -95,8 +94,7 @@ static inline void debug_dma_map_single(struct device *dev, const void *addr, ...@@ -95,8 +94,7 @@ static inline void debug_dma_map_single(struct device *dev, const void *addr,
static inline void debug_dma_map_page(struct device *dev, struct page *page, static inline void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size, size_t offset, size_t size,
int direction, dma_addr_t dma_addr, int direction, dma_addr_t dma_addr)
bool map_single)
{ {
} }
...@@ -106,8 +104,7 @@ static inline void debug_dma_mapping_error(struct device *dev, ...@@ -106,8 +104,7 @@ static inline void debug_dma_mapping_error(struct device *dev,
} }
static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction, size_t size, int direction)
bool map_single)
{ {
} }
......
This diff is collapsed.
...@@ -223,7 +223,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, ...@@ -223,7 +223,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
*/ */
return mem->flags & DMA_MEMORY_EXCLUSIVE; return mem->flags & DMA_MEMORY_EXCLUSIVE;
} }
EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
{ {
...@@ -268,7 +267,6 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) ...@@ -268,7 +267,6 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
return __dma_release_from_coherent(mem, order, vaddr); return __dma_release_from_coherent(mem, order, vaddr);
} }
EXPORT_SYMBOL(dma_release_from_dev_coherent);
int dma_release_from_global_coherent(int order, void *vaddr) int dma_release_from_global_coherent(int order, void *vaddr)
{ {
......
...@@ -49,7 +49,6 @@ ...@@ -49,7 +49,6 @@
enum { enum {
dma_debug_single, dma_debug_single,
dma_debug_page,
dma_debug_sg, dma_debug_sg,
dma_debug_coherent, dma_debug_coherent,
dma_debug_resource, dma_debug_resource,
...@@ -1300,8 +1299,7 @@ void debug_dma_map_single(struct device *dev, const void *addr, ...@@ -1300,8 +1299,7 @@ void debug_dma_map_single(struct device *dev, const void *addr,
EXPORT_SYMBOL(debug_dma_map_single); EXPORT_SYMBOL(debug_dma_map_single);
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
size_t size, int direction, dma_addr_t dma_addr, size_t size, int direction, dma_addr_t dma_addr)
bool map_single)
{ {
struct dma_debug_entry *entry; struct dma_debug_entry *entry;
...@@ -1316,7 +1314,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, ...@@ -1316,7 +1314,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
return; return;
entry->dev = dev; entry->dev = dev;
entry->type = dma_debug_page; entry->type = dma_debug_single;
entry->pfn = page_to_pfn(page); entry->pfn = page_to_pfn(page);
entry->offset = offset, entry->offset = offset,
entry->dev_addr = dma_addr; entry->dev_addr = dma_addr;
...@@ -1324,9 +1322,6 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, ...@@ -1324,9 +1322,6 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
entry->direction = direction; entry->direction = direction;
entry->map_err_type = MAP_ERR_NOT_CHECKED; entry->map_err_type = MAP_ERR_NOT_CHECKED;
if (map_single)
entry->type = dma_debug_single;
check_for_stack(dev, page, offset); check_for_stack(dev, page, offset);
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
...@@ -1378,10 +1373,10 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -1378,10 +1373,10 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
EXPORT_SYMBOL(debug_dma_mapping_error); EXPORT_SYMBOL(debug_dma_mapping_error);
void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction, bool map_single) size_t size, int direction)
{ {
struct dma_debug_entry ref = { struct dma_debug_entry ref = {
.type = dma_debug_page, .type = dma_debug_single,
.dev = dev, .dev = dev,
.dev_addr = addr, .dev_addr = addr,
.size = size, .size = size,
...@@ -1390,10 +1385,6 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, ...@@ -1390,10 +1385,6 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
if (unlikely(dma_debug_disabled())) if (unlikely(dma_debug_disabled()))
return; return;
if (map_single)
ref.type = dma_debug_single;
check_unmap(&ref); check_unmap(&ref);
} }
EXPORT_SYMBOL(debug_dma_unmap_page); EXPORT_SYMBOL(debug_dma_unmap_page);
...@@ -1521,7 +1512,6 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, ...@@ -1521,7 +1512,6 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
add_dma_entry(entry); add_dma_entry(entry);
} }
EXPORT_SYMBOL(debug_dma_alloc_coherent);
void debug_dma_free_coherent(struct device *dev, size_t size, void debug_dma_free_coherent(struct device *dev, size_t size,
void *virt, dma_addr_t addr) void *virt, dma_addr_t addr)
...@@ -1549,7 +1539,6 @@ void debug_dma_free_coherent(struct device *dev, size_t size, ...@@ -1549,7 +1539,6 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
check_unmap(&ref); check_unmap(&ref);
} }
EXPORT_SYMBOL(debug_dma_free_coherent);
void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
int direction, dma_addr_t dma_addr) int direction, dma_addr_t dma_addr)
......
...@@ -45,45 +45,6 @@ static int dmam_match(struct device *dev, void *res, void *match_data) ...@@ -45,45 +45,6 @@ static int dmam_match(struct device *dev, void *res, void *match_data)
return 0; return 0;
} }
/**
* dmam_alloc_coherent - Managed dma_alloc_coherent()
* @dev: Device to allocate coherent memory for
* @size: Size of allocation
* @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
*
* Managed dma_alloc_coherent(). Memory allocated using this function
* will be automatically released on driver detach.
*
* RETURNS:
* Pointer to allocated memory on success, NULL on failure.
*/
void *dmam_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
struct dma_devres *dr;
void *vaddr;
dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
if (!dr)
return NULL;
vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
if (!vaddr) {
devres_free(dr);
return NULL;
}
dr->vaddr = vaddr;
dr->dma_handle = *dma_handle;
dr->size = size;
devres_add(dev, dr);
return vaddr;
}
EXPORT_SYMBOL(dmam_alloc_coherent);
/** /**
* dmam_free_coherent - Managed dma_free_coherent() * dmam_free_coherent - Managed dma_free_coherent()
* @dev: Device to free coherent memory for * @dev: Device to free coherent memory for
...@@ -144,61 +105,6 @@ void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -144,61 +105,6 @@ void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
} }
EXPORT_SYMBOL(dmam_alloc_attrs); EXPORT_SYMBOL(dmam_alloc_attrs);
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
static void dmam_coherent_decl_release(struct device *dev, void *res)
{
dma_release_declared_memory(dev);
}
/**
* dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
* @dev: Device to declare coherent memory for
* @phys_addr: Physical address of coherent memory to be declared
* @device_addr: Device address of coherent memory to be declared
* @size: Size of coherent memory to be declared
* @flags: Flags
*
* Managed dma_declare_coherent_memory().
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
void *res;
int rc;
res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
if (!res)
return -ENOMEM;
rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
flags);
if (!rc)
devres_add(dev, res);
else
devres_free(res);
return rc;
}
EXPORT_SYMBOL(dmam_declare_coherent_memory);
/**
* dmam_release_declared_memory - Managed dma_release_declared_memory().
* @dev: Device to release declared coherent memory for
*
* Managed dmam_release_declared_memory().
*/
void dmam_release_declared_memory(struct device *dev)
{
WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
}
EXPORT_SYMBOL(dmam_release_declared_memory);
#endif
/* /*
* Create scatter-list for the already allocated DMA buffer. * Create scatter-list for the already allocated DMA buffer.
*/ */
......
...@@ -204,8 +204,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -204,8 +204,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
ret = dma_alloc_from_pool(size, &page, flags); ret = dma_alloc_from_pool(size, &page, flags);
if (!ret) if (!ret)
return NULL; return NULL;
*dma_handle = phys_to_dma(dev, page_to_phys(page)); goto done;
return ret;
} }
page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs); page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
...@@ -215,8 +214,10 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -215,8 +214,10 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
/* remove any dirty cache lines on the kernel alias */ /* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, size); arch_dma_prep_coherent(page, size);
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
return page; /* opaque cookie */ ret = page; /* opaque cookie */
goto done;
}
/* create a coherent mapping */ /* create a coherent mapping */
ret = dma_common_contiguous_remap(page, size, VM_USERMAP, ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
...@@ -227,9 +228,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -227,9 +228,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
return ret; return ret;
} }
*dma_handle = phys_to_dma(dev, page_to_phys(page));
memset(ret, 0, size); memset(ret, 0, size);
done:
*dma_handle = phys_to_dma(dev, page_to_phys(page));
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment