Commit 8c18fc63 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-5.8-6' of git://git.infradead.org/users/hch/dma-mapping into master

Pull dma-mapping fixes from Christoph Hellwig:
 "Ensure we always have fully addressable memory in the dma coherent
  pool (Nicolas Saenz Julienne)"

* tag 'dma-mapping-5.8-6' of git://git.infradead.org/users/hch/dma-mapping:
  dma-pool: do not allocate pool memory from CMA
  dma-pool: make sure atomic pool suits device
  dma-pool: introduce dma_guess_pool()
  dma-pool: get rid of dma_in_atomic_pool()
  dma-direct: provide function to check physical memory area validity
parents f932d58a d9765e41
...@@ -69,6 +69,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size, ...@@ -69,6 +69,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
u64 dma_direct_get_required_mask(struct device *dev); u64 dma_direct_get_required_mask(struct device *dev);
gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
u64 *phys_mask); u64 *phys_mask);
bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs); gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
......
...@@ -70,7 +70,7 @@ gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, ...@@ -70,7 +70,7 @@ gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
return 0; return 0;
} }
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{ {
return phys_to_dma_direct(dev, phys) + size - 1 <= return phys_to_dma_direct(dev, phys) + size - 1 <=
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/dma-direct.h> #include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h> #include <linux/dma-noncoherent.h>
#include <linux/dma-contiguous.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/genalloc.h> #include <linux/genalloc.h>
#include <linux/set_memory.h> #include <linux/set_memory.h>
...@@ -69,11 +68,6 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, ...@@ -69,11 +68,6 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
do { do {
pool_size = 1 << (PAGE_SHIFT + order); pool_size = 1 << (PAGE_SHIFT + order);
if (dev_get_cma_area(NULL))
page = dma_alloc_from_contiguous(NULL, 1 << order,
order, false);
else
page = alloc_pages(gfp, order); page = alloc_pages(gfp, order);
} while (!page && order-- > 0); } while (!page && order-- > 0);
if (!page) if (!page)
...@@ -118,7 +112,6 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, ...@@ -118,7 +112,6 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
dma_common_free_remap(addr, pool_size); dma_common_free_remap(addr, pool_size);
#endif #endif
free_page: __maybe_unused free_page: __maybe_unused
if (!dma_release_from_contiguous(NULL, page, 1 << order))
__free_pages(page, order); __free_pages(page, order);
out: out:
return ret; return ret;
...@@ -203,7 +196,7 @@ static int __init dma_atomic_pool_init(void) ...@@ -203,7 +196,7 @@ static int __init dma_atomic_pool_init(void)
} }
postcore_initcall(dma_atomic_pool_init); postcore_initcall(dma_atomic_pool_init);
static inline struct gen_pool *dev_to_pool(struct device *dev) static inline struct gen_pool *dma_guess_pool_from_device(struct device *dev)
{ {
u64 phys_mask; u64 phys_mask;
gfp_t gfp; gfp_t gfp;
...@@ -217,51 +210,79 @@ static inline struct gen_pool *dev_to_pool(struct device *dev) ...@@ -217,51 +210,79 @@ static inline struct gen_pool *dev_to_pool(struct device *dev)
return atomic_pool_kernel; return atomic_pool_kernel;
} }
static bool dma_in_atomic_pool(struct device *dev, void *start, size_t size) static inline struct gen_pool *dma_get_safer_pool(struct gen_pool *bad_pool)
{ {
struct gen_pool *pool = dev_to_pool(dev); if (bad_pool == atomic_pool_kernel)
return atomic_pool_dma32 ? : atomic_pool_dma;
if (unlikely(!pool)) if (bad_pool == atomic_pool_dma32)
return false; return atomic_pool_dma;
return gen_pool_has_addr(pool, (unsigned long)start, size);
return NULL;
}
static inline struct gen_pool *dma_guess_pool(struct device *dev,
struct gen_pool *bad_pool)
{
if (bad_pool)
return dma_get_safer_pool(bad_pool);
return dma_guess_pool_from_device(dev);
} }
void *dma_alloc_from_pool(struct device *dev, size_t size, void *dma_alloc_from_pool(struct device *dev, size_t size,
struct page **ret_page, gfp_t flags) struct page **ret_page, gfp_t flags)
{ {
struct gen_pool *pool = dev_to_pool(dev); struct gen_pool *pool = NULL;
unsigned long val; unsigned long val = 0;
void *ptr = NULL; void *ptr = NULL;
phys_addr_t phys;
while (1) {
pool = dma_guess_pool(dev, pool);
if (!pool) { if (!pool) {
WARN(1, "%pGg atomic pool not initialised!\n", &flags); WARN(1, "Failed to get suitable pool for %s\n",
return NULL; dev_name(dev));
break;
} }
val = gen_pool_alloc(pool, size); val = gen_pool_alloc(pool, size);
if (likely(val)) { if (!val)
phys_addr_t phys = gen_pool_virt_to_phys(pool, val); continue;
phys = gen_pool_virt_to_phys(pool, val);
if (dma_coherent_ok(dev, phys, size))
break;
gen_pool_free(pool, val, size);
val = 0;
}
if (val) {
*ret_page = pfn_to_page(__phys_to_pfn(phys)); *ret_page = pfn_to_page(__phys_to_pfn(phys));
ptr = (void *)val; ptr = (void *)val;
memset(ptr, 0, size); memset(ptr, 0, size);
} else {
WARN_ONCE(1, "DMA coherent pool depleted, increase size "
"(recommended min coherent_pool=%zuK)\n",
gen_pool_size(pool) >> 9);
}
if (gen_pool_avail(pool) < atomic_pool_size) if (gen_pool_avail(pool) < atomic_pool_size)
schedule_work(&atomic_pool_work); schedule_work(&atomic_pool_work);
}
return ptr; return ptr;
} }
bool dma_free_from_pool(struct device *dev, void *start, size_t size) bool dma_free_from_pool(struct device *dev, void *start, size_t size)
{ {
struct gen_pool *pool = dev_to_pool(dev); struct gen_pool *pool = NULL;
if (!dma_in_atomic_pool(dev, start, size)) while (1) {
pool = dma_guess_pool(dev, pool);
if (!pool)
return false; return false;
if (gen_pool_has_addr(pool, (unsigned long)start, size)) {
gen_pool_free(pool, (unsigned long)start, size); gen_pool_free(pool, (unsigned long)start, size);
return true; return true;
}
}
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment