Commit 080321d3 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-direct: add support for CMA allocation

Try the CMA allocator for coherent allocations if supported.

Roughly modelled after the x86 code.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 27975969
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/dma-direct.h> #include <linux/dma-direct.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/dma-contiguous.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#define DIRECT_MAPPING_ERROR 0 #define DIRECT_MAPPING_ERROR 0
...@@ -29,19 +30,30 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, ...@@ -29,19 +30,30 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
static void *dma_direct_alloc(struct device *dev, size_t size, static void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{ {
void *ret; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
int page_order = get_order(size);
struct page *page = NULL;
ret = (void *)__get_free_pages(gfp, get_order(size)); /* CMA can be used only in the context which permits sleeping */
if (ret) if (gfpflags_allow_blocking(gfp))
*dma_handle = phys_to_dma(dev, virt_to_phys(ret)); page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
if (!page)
page = alloc_pages(gfp, page_order);
if (!page)
return NULL;
return ret; *dma_handle = phys_to_dma(dev, page_to_phys(page));
memset(page_address(page), 0, size);
return page_address(page);
} }
static void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, static void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs) dma_addr_t dma_addr, unsigned long attrs)
{ {
free_pages((unsigned long)cpu_addr, get_order(size)); unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
free_pages((unsigned long)cpu_addr, get_order(size));
} }
static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment