Commit 81e9d894 authored by Nicolas Saenz Julienne's avatar Nicolas Saenz Julienne Committed by Christoph Hellwig

dma-pool: make sure atomic pool suits device

When allocating DMA memory from a pool, the core can only guess which
atomic pool will fit a device's constraints. If it doesn't, get a safer
atomic pool and try again.

Fixes: c84dc6e6 ("dma-pool: add additional coherent pools to map to gfp mask")
Reported-by: default avatarJeremy Linton <jeremy.linton@arm.com>
Suggested-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarNicolas Saenz Julienne <nsaenzjulienne@suse.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 48b67038
...@@ -240,39 +240,56 @@ static inline struct gen_pool *dma_guess_pool(struct device *dev, ...@@ -240,39 +240,56 @@ static inline struct gen_pool *dma_guess_pool(struct device *dev,
void *dma_alloc_from_pool(struct device *dev, size_t size, void *dma_alloc_from_pool(struct device *dev, size_t size,
struct page **ret_page, gfp_t flags) struct page **ret_page, gfp_t flags)
{ {
struct gen_pool *pool = dma_guess_pool(dev, NULL); struct gen_pool *pool = NULL;
unsigned long val; unsigned long val = 0;
void *ptr = NULL; void *ptr = NULL;
phys_addr_t phys;
while (1) {
pool = dma_guess_pool(dev, pool);
if (!pool) { if (!pool) {
WARN(1, "%pGg atomic pool not initialised!\n", &flags); WARN(1, "Failed to get suitable pool for %s\n",
return NULL; dev_name(dev));
break;
} }
val = gen_pool_alloc(pool, size); val = gen_pool_alloc(pool, size);
if (likely(val)) { if (!val)
phys_addr_t phys = gen_pool_virt_to_phys(pool, val); continue;
phys = gen_pool_virt_to_phys(pool, val);
if (dma_coherent_ok(dev, phys, size))
break;
gen_pool_free(pool, val, size);
val = 0;
}
if (val) {
*ret_page = pfn_to_page(__phys_to_pfn(phys)); *ret_page = pfn_to_page(__phys_to_pfn(phys));
ptr = (void *)val; ptr = (void *)val;
memset(ptr, 0, size); memset(ptr, 0, size);
} else {
WARN_ONCE(1, "DMA coherent pool depleted, increase size "
"(recommended min coherent_pool=%zuK)\n",
gen_pool_size(pool) >> 9);
}
if (gen_pool_avail(pool) < atomic_pool_size) if (gen_pool_avail(pool) < atomic_pool_size)
schedule_work(&atomic_pool_work); schedule_work(&atomic_pool_work);
}
return ptr; return ptr;
} }
bool dma_free_from_pool(struct device *dev, void *start, size_t size) bool dma_free_from_pool(struct device *dev, void *start, size_t size)
{ {
struct gen_pool *pool = dma_guess_pool(dev, NULL); struct gen_pool *pool = NULL;
if (!pool || !gen_pool_has_addr(pool, (unsigned long)start, size)) while (1) {
pool = dma_guess_pool(dev, pool);
if (!pool)
return false; return false;
if (gen_pool_has_addr(pool, (unsigned long)start, size)) {
gen_pool_free(pool, (unsigned long)start, size); gen_pool_free(pool, (unsigned long)start, size);
return true; return true;
}
}
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment