Commit 0f51596b authored by Marek Szyprowski's avatar Marek Szyprowski

Merge branch 'for-next-arm-dma' into for-linus

Conflicts:
	arch/arm/Kconfig
	arch/arm/mm/dma-mapping.c
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
parents 61f6c7a4 4ce63fcd
...@@ -4,6 +4,7 @@ config ARM ...@@ -4,6 +4,7 @@ config ARM
select HAVE_AOUT select HAVE_AOUT
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
select CMA if (CPU_V6 || CPU_V6K || CPU_V7) select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
...@@ -47,6 +48,14 @@ config ARM ...@@ -47,6 +48,14 @@ config ARM
config ARM_HAS_SG_CHAIN config ARM_HAS_SG_CHAIN
bool bool
config NEED_SG_DMA_LENGTH
bool
config ARM_DMA_USE_IOMMU
select NEED_SG_DMA_LENGTH
select ARM_HAS_SG_CHAIN
bool
config HAVE_PWM config HAVE_PWM
bool bool
......
...@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_ ...@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
read_lock_irqsave(&device_info->lock, flags); read_lock_irqsave(&device_info->lock, flags);
list_for_each_entry(b, &device_info->safe_buffers, node) list_for_each_entry(b, &device_info->safe_buffers, node)
if (b->safe_dma_addr == safe_dma_addr) { if (b->safe_dma_addr <= safe_dma_addr &&
b->safe_dma_addr + b->size > safe_dma_addr) {
rb = b; rb = b;
break; break;
} }
...@@ -254,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, ...@@ -254,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
if (buf == NULL) { if (buf == NULL) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n", dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr); __func__, ptr);
return ~0; return DMA_ERROR_CODE;
} }
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
...@@ -307,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf, ...@@ -307,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
* substitute the safe buffer for the unsafe one. * substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one) * (basically move the buffer from an unsafe area to a safe one)
*/ */
dma_addr_t __dma_map_page(struct device *dev, struct page *page, static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir) unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{ {
dma_addr_t dma_addr; dma_addr_t dma_addr;
int ret; int ret;
...@@ -320,21 +322,20 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page, ...@@ -320,21 +322,20 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
ret = needs_bounce(dev, dma_addr, size); ret = needs_bounce(dev, dma_addr, size);
if (ret < 0) if (ret < 0)
return ~0; return DMA_ERROR_CODE;
if (ret == 0) { if (ret == 0) {
__dma_page_cpu_to_dev(page, offset, size, dir); arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
return dma_addr; return dma_addr;
} }
if (PageHighMem(page)) { if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
return ~0; return DMA_ERROR_CODE;
} }
return map_single(dev, page_address(page) + offset, size, dir); return map_single(dev, page_address(page) + offset, size, dir);
} }
EXPORT_SYMBOL(__dma_map_page);
/* /*
* see if a mapped address was really a "safe" buffer and if so, copy * see if a mapped address was really a "safe" buffer and if so, copy
...@@ -342,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page); ...@@ -342,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
* the safe buffer. (basically return things back to the way they * the safe buffer. (basically return things back to the way they
* should be) * should be)
*/ */
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir, struct dma_attrs *attrs)
{ {
struct safe_buffer *buf; struct safe_buffer *buf;
...@@ -352,19 +353,18 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, ...@@ -352,19 +353,18 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
buf = find_safe_buffer_dev(dev, dma_addr, __func__); buf = find_safe_buffer_dev(dev, dma_addr, __func__);
if (!buf) { if (!buf) {
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
dma_addr & ~PAGE_MASK, size, dir);
return; return;
} }
unmap_single(dev, buf, size, dir); unmap_single(dev, buf, size, dir);
} }
EXPORT_SYMBOL(__dma_unmap_page);
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir) size_t sz, enum dma_data_direction dir)
{ {
struct safe_buffer *buf; struct safe_buffer *buf;
unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir); __func__, addr, off, sz, dir);
...@@ -373,6 +373,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, ...@@ -373,6 +373,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
if (!buf) if (!buf)
return 1; return 1;
off = addr - buf->safe_dma_addr;
BUG_ON(buf->direction != dir); BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
...@@ -388,12 +390,21 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, ...@@ -388,12 +390,21 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
} }
return 0; return 0;
} }
EXPORT_SYMBOL(dmabounce_sync_for_cpu);
int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, static void dmabounce_sync_for_cpu(struct device *dev,
unsigned long off, size_t sz, enum dma_data_direction dir) dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
return;
arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
}
static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
size_t sz, enum dma_data_direction dir)
{ {
struct safe_buffer *buf; struct safe_buffer *buf;
unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir); __func__, addr, off, sz, dir);
...@@ -402,6 +413,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, ...@@ -402,6 +413,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
if (!buf) if (!buf)
return 1; return 1;
off = addr - buf->safe_dma_addr;
BUG_ON(buf->direction != dir); BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
...@@ -417,7 +430,38 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, ...@@ -417,7 +430,38 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
} }
return 0; return 0;
} }
EXPORT_SYMBOL(dmabounce_sync_for_device);
static void dmabounce_sync_for_device(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
if (!__dmabounce_sync_for_device(dev, handle, size, dir))
return;
arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
}
static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
{
if (dev->archdata.dmabounce)
return 0;
return arm_dma_ops.set_dma_mask(dev, dma_mask);
}
static struct dma_map_ops dmabounce_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
.mmap = arm_dma_mmap,
.map_page = dmabounce_map_page,
.unmap_page = dmabounce_unmap_page,
.sync_single_for_cpu = dmabounce_sync_for_cpu,
.sync_single_for_device = dmabounce_sync_for_device,
.map_sg = arm_dma_map_sg,
.unmap_sg = arm_dma_unmap_sg,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
.set_dma_mask = dmabounce_set_mask,
};
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
const char *name, unsigned long size) const char *name, unsigned long size)
...@@ -479,6 +523,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, ...@@ -479,6 +523,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
#endif #endif
dev->archdata.dmabounce = device_info; dev->archdata.dmabounce = device_info;
set_dma_ops(dev, &dmabounce_ops);
dev_info(dev, "dmabounce: registered device\n"); dev_info(dev, "dmabounce: registered device\n");
...@@ -497,6 +542,7 @@ void dmabounce_unregister_dev(struct device *dev) ...@@ -497,6 +542,7 @@ void dmabounce_unregister_dev(struct device *dev)
struct dmabounce_device_info *device_info = dev->archdata.dmabounce; struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
dev->archdata.dmabounce = NULL; dev->archdata.dmabounce = NULL;
set_dma_ops(dev, NULL);
if (!device_info) { if (!device_info) {
dev_warn(dev, dev_warn(dev,
......
...@@ -7,12 +7,16 @@ ...@@ -7,12 +7,16 @@
#define ASMARM_DEVICE_H #define ASMARM_DEVICE_H
struct dev_archdata { struct dev_archdata {
struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE #ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce; struct dmabounce_device_info *dmabounce;
#endif #endif
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */ void *iommu; /* private IOMMU data */
#endif #endif
#ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping;
#endif
}; };
struct omap_device; struct omap_device;
......
#ifndef ASMARM_DMA_IOMMU_H
#define ASMARM_DMA_IOMMU_H
#ifdef __KERNEL__
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/kmemcheck.h>
struct dma_iommu_mapping {
/* iommu specific data */
struct iommu_domain *domain;
void *bitmap;
size_t bits;
unsigned int order;
dma_addr_t base;
spinlock_t lock;
struct kref kref;
};
struct dma_iommu_mapping *
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
int order);
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
#endif /* __KERNEL__ */
#endif
This diff is collapsed.
This diff is collapsed.
...@@ -17,7 +17,7 @@ struct arm_vmregion { ...@@ -17,7 +17,7 @@ struct arm_vmregion {
struct list_head vm_list; struct list_head vm_list;
unsigned long vm_start; unsigned long vm_start;
unsigned long vm_end; unsigned long vm_end;
struct page *vm_pages; void *priv;
int vm_active; int vm_active;
const void *caller; const void *caller;
}; };
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
struct dma_coherent_mem { struct dma_coherent_mem {
void *virt_base; void *virt_base;
dma_addr_t device_base; dma_addr_t device_base;
phys_addr_t pfn_base;
int size; int size;
int flags; int flags;
unsigned long *bitmap; unsigned long *bitmap;
...@@ -44,6 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, ...@@ -44,6 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dev->dma_mem->virt_base = mem_base; dev->dma_mem->virt_base = mem_base;
dev->dma_mem->device_base = device_addr; dev->dma_mem->device_base = device_addr;
dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
dev->dma_mem->size = pages; dev->dma_mem->size = pages;
dev->dma_mem->flags = flags; dev->dma_mem->flags = flags;
...@@ -176,3 +178,43 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) ...@@ -176,3 +178,43 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
return 0; return 0;
} }
EXPORT_SYMBOL(dma_release_from_coherent); EXPORT_SYMBOL(dma_release_from_coherent);
/**
* dma_mmap_from_coherent() - try to mmap the memory allocated from
* per-device coherent memory pool to userspace
* @dev: device from which the memory was allocated
* @vma: vm_area for the userspace memory
* @vaddr: cpu address returned by dma_alloc_from_coherent
* @size: size of the memory buffer allocated by dma_alloc_from_coherent
*
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, maps that memory to the provided vma.
*
* Returns 1 if we correctly mapped the memory, or 0 if
* dma_release_coherent() should proceed with mapping memory from
* generic pools.
*/
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *vaddr, size_t size, int *ret)
{
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
if (mem && vaddr >= mem->virt_base && vaddr + size <=
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
unsigned long off = vma->vm_pgoff;
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
int count = size >> PAGE_SHIFT;
*ret = -ENXIO;
if (off < count && user_count <= count - off) {
unsigned pfn = mem->pfn_base + start + off;
*ret = remap_pfn_range(vma, vma->vm_start, pfn,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
return 1;
}
return 0;
}
EXPORT_SYMBOL(dma_mmap_from_coherent);
...@@ -3,13 +3,15 @@ ...@@ -3,13 +3,15 @@
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
/* /*
* These two functions are only for dma allocator. * These three functions are only for dma allocator.
* Don't use them in device drivers. * Don't use them in device drivers.
*/ */
int dma_alloc_from_coherent(struct device *dev, ssize_t size, int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret); dma_addr_t *dma_handle, void **ret);
int dma_release_from_coherent(struct device *dev, int order, void *vaddr); int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret);
/* /*
* Standard interface * Standard interface
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment