Commit 0a2b9a6e authored by Marek Szyprowski's avatar Marek Szyprowski

X86: integrate CMA with DMA-mapping subsystem

This patch adds support for CMA to dma-mapping subsystem for x86
architecture that uses common pci-dma/pci-nommu implementation. This
allows to test CMA on KVM/QEMU and a lot of common x86 boxes.
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
CC: Michal Nazarewicz <mina86@mina86.com>
Acked-by: default avatarArnd Bergmann <arnd@arndb.de>
parent c64be2bb
...@@ -31,6 +31,7 @@ config X86 ...@@ -31,6 +31,7 @@ config X86
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if !SWIOTLB
select HAVE_KRETPROBES select HAVE_KRETPROBES
select HAVE_OPTPROBES select HAVE_OPTPROBES
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
......
#ifndef ASMX86_DMA_CONTIGUOUS_H
#define ASMX86_DMA_CONTIGUOUS_H
#ifdef __KERNEL__
#include <linux/types.h>
#include <asm-generic/dma-contiguous.h>
static inline void
dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
#endif
#endif
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/swiotlb.h> #include <asm/swiotlb.h>
#include <asm-generic/dma-coherent.h> #include <asm-generic/dma-coherent.h>
#include <linux/dma-contiguous.h>
#ifdef CONFIG_ISA #ifdef CONFIG_ISA
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24) # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
...@@ -62,6 +63,10 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, ...@@ -62,6 +63,10 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag, dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs); struct dma_attrs *attrs);
extern void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
struct dma_attrs *attrs);
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{ {
if (!dev->dma_mask) if (!dev->dma_mask)
......
...@@ -100,14 +100,18 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, ...@@ -100,14 +100,18 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
struct dma_attrs *attrs) struct dma_attrs *attrs)
{ {
unsigned long dma_mask; unsigned long dma_mask;
struct page *page; struct page *page = NULL;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t addr; dma_addr_t addr;
dma_mask = dma_alloc_coherent_mask(dev, flag); dma_mask = dma_alloc_coherent_mask(dev, flag);
flag |= __GFP_ZERO; flag |= __GFP_ZERO;
again: again:
page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); if (!(flag & GFP_ATOMIC))
page = dma_alloc_from_contiguous(dev, count, get_order(size));
if (!page)
page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
if (!page) if (!page)
return NULL; return NULL;
...@@ -127,6 +131,16 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, ...@@ -127,6 +131,16 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
return page_address(page); return page_address(page);
} }
void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr, struct dma_attrs *attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = virt_to_page(vaddr);
if (!dma_release_from_contiguous(dev, page, count))
free_pages((unsigned long)vaddr, get_order(size));
}
/* /*
* See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
* parameter documentation. * parameter documentation.
......
...@@ -74,12 +74,6 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, ...@@ -74,12 +74,6 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
return nents; return nents;
} }
static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr, struct dma_attrs *attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
}
static void nommu_sync_single_for_device(struct device *dev, static void nommu_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, dma_addr_t addr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
...@@ -97,7 +91,7 @@ static void nommu_sync_sg_for_device(struct device *dev, ...@@ -97,7 +91,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
struct dma_map_ops nommu_dma_ops = { struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent, .alloc = dma_generic_alloc_coherent,
.free = nommu_free_coherent, .free = dma_generic_free_coherent,
.map_sg = nommu_map_sg, .map_sg = nommu_map_sg,
.map_page = nommu_map_page, .map_page = nommu_map_page,
.sync_single_for_device = nommu_sync_single_for_device, .sync_single_for_device = nommu_sync_single_for_device,
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <linux/init_ohci1394_dma.h> #include <linux/init_ohci1394_dma.h>
#include <linux/kvm_para.h> #include <linux/kvm_para.h>
#include <linux/dma-contiguous.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -934,6 +935,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -934,6 +935,7 @@ void __init setup_arch(char **cmdline_p)
} }
#endif #endif
memblock.current_limit = get_max_mapped(); memblock.current_limit = get_max_mapped();
dma_contiguous_reserve(0);
/* /*
* NOTE: On x86-32, only from this point on, fixmaps are ready for use. * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment