Commit 3162bbd7 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

DMA, CMA: separate core CMA management codes from DMA APIs

To prepare future generalization work on CMA area management code, we
need to separate core CMA management codes from DMA APIs.  We will
extend these core functions to cover requirements of PPC KVM's CMA area
management functionality in following patches.  This separation helps us
not to touch DMA APIs while extending core functions.
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: default avatarMichal Nazarewicz <mina86@mina86.com>
Reviewed-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Alexander Graf <agraf@suse.de>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Tested-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bc7f84c0
...@@ -213,26 +213,9 @@ static int __init cma_init_reserved_areas(void) ...@@ -213,26 +213,9 @@ static int __init cma_init_reserved_areas(void)
} }
core_initcall(cma_init_reserved_areas); core_initcall(cma_init_reserved_areas);
/** static int __init __dma_contiguous_reserve_area(phys_addr_t size,
* dma_contiguous_reserve_area() - reserve custom contiguous area phys_addr_t base, phys_addr_t limit,
* @size: Size of the reserved area (in bytes), struct cma **res_cma, bool fixed)
* @base: Base address of the reserved area optional, use 0 for any
* @limit: End address of the reserved memory (optional, 0 for any).
* @res_cma: Pointer to store the created cma region.
* @fixed: hint about where to place the reserved area
*
* This function reserves memory from early allocator. It should be
* called by arch specific code once the early allocator (memblock or bootmem)
* has been activated and all other subsystems have already allocated/reserved
* memory. This function allows to create custom reserved areas for specific
* devices.
*
* If @fixed is true, reserve contiguous area at exactly @base. If false,
* reserve in range from @base to @limit.
*/
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
phys_addr_t limit, struct cma **res_cma,
bool fixed)
{ {
struct cma *cma = &cma_areas[cma_area_count]; struct cma *cma = &cma_areas[cma_area_count];
phys_addr_t alignment; phys_addr_t alignment;
...@@ -286,15 +269,47 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, ...@@ -286,15 +269,47 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
(unsigned long)base); (unsigned long)base);
/* Architecture specific contiguous memory fixup. */
dma_contiguous_early_fixup(base, size);
return 0; return 0;
err: err:
pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
return ret; return ret;
} }
/**
* dma_contiguous_reserve_area() - reserve custom contiguous area
* @size: Size of the reserved area (in bytes),
* @base: Base address of the reserved area optional, use 0 for any
* @limit: End address of the reserved memory (optional, 0 for any).
* @res_cma: Pointer to store the created cma region.
* @fixed: hint about where to place the reserved area
*
* This function reserves memory from early allocator. It should be
* called by arch specific code once the early allocator (memblock or bootmem)
* has been activated and all other subsystems have already allocated/reserved
* memory. This function allows to create custom reserved areas for specific
* devices.
*
* If @fixed is true, reserve contiguous area at exactly @base. If false,
* reserve in range from @base to @limit.
*/
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
phys_addr_t limit, struct cma **res_cma,
bool fixed)
{
int ret;
ret = __dma_contiguous_reserve_area(size, base, limit, res_cma, fixed);
if (ret)
return ret;
/* Architecture specific contiguous memory fixup. */
dma_contiguous_early_fixup(PFN_PHYS((*res_cma)->base_pfn),
(*res_cma)->count << PAGE_SHIFT);
return 0;
}
static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
{ {
mutex_lock(&cma->lock); mutex_lock(&cma->lock);
...@@ -302,31 +317,16 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) ...@@ -302,31 +317,16 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
mutex_unlock(&cma->lock); mutex_unlock(&cma->lock);
} }
/** static struct page *__dma_alloc_from_contiguous(struct cma *cma, int count,
* dma_alloc_from_contiguous() - allocate pages from contiguous area
* @dev: Pointer to device for which the allocation is performed.
* @count: Requested number of pages.
* @align: Requested alignment of pages (in PAGE_SIZE order).
*
* This function allocates memory buffer for specified device. It uses
* device specific contiguous memory area if available or the default
* global one. Requires architecture specific dev_get_cma_area() helper
* function.
*/
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
unsigned int align) unsigned int align)
{ {
unsigned long mask, pfn, pageno, start = 0; unsigned long mask, pfn, pageno, start = 0;
struct cma *cma = dev_get_cma_area(dev);
struct page *page = NULL; struct page *page = NULL;
int ret; int ret;
if (!cma || !cma->count) if (!cma || !cma->count)
return NULL; return NULL;
if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
count, align); count, align);
...@@ -375,19 +375,30 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count, ...@@ -375,19 +375,30 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
} }
/** /**
* dma_release_from_contiguous() - release allocated pages * dma_alloc_from_contiguous() - allocate pages from contiguous area
* @dev: Pointer to device for which the pages were allocated. * @dev: Pointer to device for which the allocation is performed.
* @pages: Allocated pages. * @count: Requested number of pages.
* @count: Number of allocated pages. * @align: Requested alignment of pages (in PAGE_SIZE order).
* *
* This function releases memory allocated by dma_alloc_from_contiguous(). * This function allocates memory buffer for specified device. It uses
* It returns false when provided pages do not belong to contiguous area and * device specific contiguous memory area if available or the default
* true otherwise. * global one. Requires architecture specific dev_get_cma_area() helper
* function.
*/ */
bool dma_release_from_contiguous(struct device *dev, struct page *pages, struct page *dma_alloc_from_contiguous(struct device *dev, int count,
int count) unsigned int align)
{ {
struct cma *cma = dev_get_cma_area(dev); struct cma *cma = dev_get_cma_area(dev);
if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
return __dma_alloc_from_contiguous(cma, count, align);
}
static bool __dma_release_from_contiguous(struct cma *cma, struct page *pages,
int count)
{
unsigned long pfn; unsigned long pfn;
if (!cma || !pages) if (!cma || !pages)
...@@ -407,3 +418,21 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, ...@@ -407,3 +418,21 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
return true; return true;
} }
/**
* dma_release_from_contiguous() - release allocated pages
* @dev: Pointer to device for which the pages were allocated.
* @pages: Allocated pages.
* @count: Number of allocated pages.
*
* This function releases memory allocated by dma_alloc_from_contiguous().
* It returns false when provided pages do not belong to contiguous area and
* true otherwise.
*/
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
int count)
{
struct cma *cma = dev_get_cma_area(dev);
return __dma_release_from_contiguous(cma, pages, count);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment