Commit e2f466e3 authored by Lucas Stach's avatar Lucas Stach Committed by Linus Torvalds

mm: cma_alloc: allow to specify GFP mask

Most users of this interface just want to use it with the default
GFP_KERNEL flags, but for cases where DMA memory is allocated it may be
called from a different context.

No functional change yet, just passing through the flag to the
underlying alloc_contig_range function.

Link: http://lkml.kernel.org/r/20170127172328.18574-2-l.stach@pengutronix.deSigned-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Alexander Graf <agraf@suse.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ca96b625
...@@ -56,7 +56,8 @@ struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) ...@@ -56,7 +56,8 @@ struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
{ {
VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
GFP_KERNEL);
} }
EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma); EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
......
...@@ -193,7 +193,7 @@ struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, ...@@ -193,7 +193,7 @@ struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
if (align > CONFIG_CMA_ALIGNMENT) if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT; align = CONFIG_CMA_ALIGNMENT;
return cma_alloc(dev_get_cma_area(dev), count, align); return cma_alloc(dev_get_cma_area(dev), count, align, GFP_KERNEL);
} }
/** /**
......
...@@ -29,6 +29,7 @@ extern int __init cma_declare_contiguous(phys_addr_t base, ...@@ -29,6 +29,7 @@ extern int __init cma_declare_contiguous(phys_addr_t base,
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit, unsigned int order_per_bit,
struct cma **res_cma); struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
gfp_t gfp_mask);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
#endif #endif
...@@ -357,7 +357,8 @@ int __init cma_declare_contiguous(phys_addr_t base, ...@@ -357,7 +357,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
* This function allocates part of contiguous memory on specific * This function allocates part of contiguous memory on specific
* contiguous memory area. * contiguous memory area.
*/ */
struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
gfp_t gfp_mask)
{ {
unsigned long mask, offset; unsigned long mask, offset;
unsigned long pfn = -1; unsigned long pfn = -1;
...@@ -403,7 +404,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) ...@@ -403,7 +404,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
mutex_lock(&cma_mutex); mutex_lock(&cma_mutex);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
GFP_KERNEL); gfp_mask);
mutex_unlock(&cma_mutex); mutex_unlock(&cma_mutex);
if (ret == 0) { if (ret == 0) {
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
......
...@@ -138,7 +138,7 @@ static int cma_alloc_mem(struct cma *cma, int count) ...@@ -138,7 +138,7 @@ static int cma_alloc_mem(struct cma *cma, int count)
if (!mem) if (!mem)
return -ENOMEM; return -ENOMEM;
p = cma_alloc(cma, count, 0); p = cma_alloc(cma, count, 0, GFP_KERNEL);
if (!p) { if (!p) {
kfree(mem); kfree(mem);
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment