Commit dc455d02 authored by Russell King's avatar Russell King

[ARM] Fix ups for ARM generic dma mapping interface

This brings the ARM dma mapping functionality into line with
the current generic interface, allowing any struct device to
be passed into the dma_* functions.

Further cleanups will be possible when the USB layer is
converted to use the dma_* API.
parent 14bf9e0a
...@@ -201,7 +201,6 @@ EXPORT_SYMBOL(__arch_clear_user); ...@@ -201,7 +201,6 @@ EXPORT_SYMBOL(__arch_clear_user);
EXPORT_SYMBOL(__arch_strnlen_user); EXPORT_SYMBOL(__arch_strnlen_user);
/* consistent area handling */ /* consistent area handling */
EXPORT_SYMBOL(pci_alloc_consistent);
EXPORT_SYMBOL(consistent_alloc); EXPORT_SYMBOL(consistent_alloc);
EXPORT_SYMBOL(consistent_free); EXPORT_SYMBOL(consistent_free);
EXPORT_SYMBOL(consistent_sync); EXPORT_SYMBOL(consistent_sync);
......
...@@ -21,8 +21,7 @@ obj-$(CONFIG_SA1100_PT_SYSTEM3) += cpu-sa1110.o ...@@ -21,8 +21,7 @@ obj-$(CONFIG_SA1100_PT_SYSTEM3) += cpu-sa1110.o
endif endif
# Next, the SA1111 stuff. # Next, the SA1111 stuff.
obj-$(CONFIG_SA1111) += sa1111.o obj-$(CONFIG_SA1111) += sa1111.o sa1111-pcibuf.o pcipool.o
obj-$(CONFIG_USB_OHCI_HCD) += sa1111-pcibuf.o pcipool.o
# Specific board support # Specific board support
obj-$(CONFIG_SA1100_ADSBITSY) += adsbitsy.o obj-$(CONFIG_SA1100_ADSBITSY) += adsbitsy.o
......
...@@ -56,7 +56,7 @@ static inline const char *slot_name(const struct pci_pool *pool) ...@@ -56,7 +56,7 @@ static inline const char *slot_name(const struct pci_pool *pool)
if (pdev == 0) if (pdev == 0)
return "[0]"; return "[0]";
else if (dev_is_sa1111(pdev)) else if (pcidev_is_sa1111(pdev))
return "[SA-1111]"; return "[SA-1111]";
else else
return pdev->slot_name; return pdev->slot_name;
......
...@@ -124,7 +124,7 @@ destroy_safe_buffer_pools(void) ...@@ -124,7 +124,7 @@ destroy_safe_buffer_pools(void)
/* allocate a 'safe' buffer and keep track of it */ /* allocate a 'safe' buffer and keep track of it */
static struct safe_buffer * static struct safe_buffer *
alloc_safe_buffer(struct pci_dev *hwdev, void *ptr, size_t size, int direction) alloc_safe_buffer(void *ptr, size_t size, int direction)
{ {
struct safe_buffer *buf; struct safe_buffer *buf;
struct pci_pool *pool; struct pci_pool *pool;
...@@ -254,7 +254,7 @@ static void print_map_stats(void) ...@@ -254,7 +254,7 @@ static void print_map_stats(void)
#endif #endif
static dma_addr_t static dma_addr_t
map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) map_single(void *ptr, size_t size, int direction)
{ {
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -267,7 +267,7 @@ map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) ...@@ -267,7 +267,7 @@ map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
DO_STATS ( bounce_count++ ) ; DO_STATS ( bounce_count++ ) ;
buf = alloc_safe_buffer(hwdev, ptr, size, direction); buf = alloc_safe_buffer(ptr, size, direction);
if (buf == 0) { if (buf == 0) {
printk(KERN_ERR printk(KERN_ERR
"%s: unable to map unsafe buffer %p!\n", "%s: unable to map unsafe buffer %p!\n",
...@@ -302,8 +302,7 @@ map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) ...@@ -302,8 +302,7 @@ map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
} }
static void static void
unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, unmap_single(dma_addr_t dma_addr, size_t size, int direction)
size_t size, int direction)
{ {
struct safe_buffer *buf; struct safe_buffer *buf;
...@@ -332,8 +331,7 @@ unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, ...@@ -332,8 +331,7 @@ unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
} }
static void static void
sync_single(struct pci_dev *hwdev, dma_addr_t dma_addr, sync_single(dma_addr_t dma_addr, size_t size, int direction)
size_t size, int direction)
{ {
struct safe_buffer *buf; struct safe_buffer *buf;
...@@ -381,20 +379,19 @@ sync_single(struct pci_dev *hwdev, dma_addr_t dma_addr, ...@@ -381,20 +379,19 @@ sync_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
* (basically move the buffer from an unsafe area to a safe one) * (basically move the buffer from an unsafe area to a safe one)
*/ */
dma_addr_t dma_addr_t
sa1111_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) sa1111_map_single(void *ptr, size_t size, int direction)
{ {
unsigned long flags; unsigned long flags;
dma_addr_t dma_addr; dma_addr_t dma_addr;
DPRINTK("%s(hwdev=%p,ptr=%p,size=%d,dir=%x)\n", DPRINTK("%s(ptr=%p,size=%d,dir=%x)\n",
__func__, hwdev, ptr, size, direction); __func__, ptr, size, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
BUG_ON(direction == PCI_DMA_NONE); BUG_ON(direction == PCI_DMA_NONE);
local_irq_save(flags); local_irq_save(flags);
dma_addr = map_single(hwdev, ptr, size, direction); dma_addr = map_single(ptr, size, direction);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -409,35 +406,31 @@ sa1111_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) ...@@ -409,35 +406,31 @@ sa1111_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
*/ */
void void
sa1111_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, sa1111_unmap_single(dma_addr_t dma_addr, size_t size, int direction)
size_t size, int direction)
{ {
unsigned long flags; unsigned long flags;
DPRINTK("%s(hwdev=%p,ptr=%p,size=%d,dir=%x)\n", DPRINTK("%s(ptr=%p,size=%d,dir=%x)\n",
__func__, hwdev, (void *) dma_addr, size, direction); __func__, (void *) dma_addr, size, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
BUG_ON(direction == PCI_DMA_NONE); BUG_ON(direction == PCI_DMA_NONE);
local_irq_save(flags); local_irq_save(flags);
unmap_single(hwdev, dma_addr, size, direction); unmap_single(dma_addr, size, direction);
local_irq_restore(flags); local_irq_restore(flags);
} }
int int
sa1111_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, sa1111_map_sg(struct scatterlist *sg, int nents, int direction)
int nents, int direction)
{ {
unsigned long flags; unsigned long flags;
int i; int i;
DPRINTK("%s(hwdev=%p,sg=%p,nents=%d,dir=%x)\n", DPRINTK("%s(sg=%p,nents=%d,dir=%x)\n",
__func__, hwdev, sg, nents, direction); __func__, sg, nents, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
BUG_ON(direction == PCI_DMA_NONE); BUG_ON(direction == PCI_DMA_NONE);
local_irq_save(flags); local_irq_save(flags);
...@@ -449,7 +442,7 @@ sa1111_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, ...@@ -449,7 +442,7 @@ sa1111_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
void *ptr = page_address(page) + offset; void *ptr = page_address(page) + offset;
sg->dma_address = sg->dma_address =
map_single(hwdev, ptr, length, direction); map_single(ptr, length, direction);
} }
local_irq_restore(flags); local_irq_restore(flags);
...@@ -458,16 +451,14 @@ sa1111_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, ...@@ -458,16 +451,14 @@ sa1111_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
} }
void void
sa1111_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, sa1111_unmap_sg(struct scatterlist *sg, int nents, int direction)
int direction)
{ {
unsigned long flags; unsigned long flags;
int i; int i;
DPRINTK("%s(hwdev=%p,sg=%p,nents=%d,dir=%x)\n", DPRINTK("%s(sg=%p,nents=%d,dir=%x)\n",
__func__, hwdev, sg, nents, direction); __func__, sg, nents, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
BUG_ON(direction == PCI_DMA_NONE); BUG_ON(direction == PCI_DMA_NONE);
local_irq_save(flags); local_irq_save(flags);
...@@ -476,41 +467,36 @@ sa1111_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, ...@@ -476,41 +467,36 @@ sa1111_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
dma_addr_t dma_addr = sg->dma_address; dma_addr_t dma_addr = sg->dma_address;
unsigned int length = sg->length; unsigned int length = sg->length;
unmap_single(hwdev, dma_addr, length, direction); unmap_single(dma_addr, length, direction);
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
void void
sa1111_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_addr, sa1111_dma_sync_single(dma_addr_t dma_addr, size_t size, int direction)
size_t size, int direction)
{ {
unsigned long flags; unsigned long flags;
DPRINTK("%s(hwdev=%p,ptr=%p,size=%d,dir=%x)\n", DPRINTK("%s(ptr=%p,size=%d,dir=%x)\n",
__func__, hwdev, (void *) dma_addr, size, direction); __func__, (void *) dma_addr, size, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
local_irq_save(flags); local_irq_save(flags);
sync_single(hwdev, dma_addr, size, direction); sync_single(dma_addr, size, direction);
local_irq_restore(flags); local_irq_restore(flags);
} }
void void
sa1111_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, sa1111_dma_sync_sg(struct scatterlist *sg, int nents, int direction)
int nents, int direction)
{ {
unsigned long flags; unsigned long flags;
int i; int i;
DPRINTK("%s(hwdev=%p,sg=%p,nents=%d,dir=%x)\n", DPRINTK("%s(sg=%p,nents=%d,dir=%x)\n",
__func__, hwdev, sg, nents, direction); __func__, sg, nents, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
BUG_ON(direction == PCI_DMA_NONE); BUG_ON(direction == PCI_DMA_NONE);
local_irq_save(flags); local_irq_save(flags);
...@@ -519,7 +505,7 @@ sa1111_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, ...@@ -519,7 +505,7 @@ sa1111_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg,
dma_addr_t dma_addr = sg->dma_address; dma_addr_t dma_addr = sg->dma_address;
unsigned int length = sg->length; unsigned int length = sg->length;
sync_single(hwdev, dma_addr, length, direction); sync_single(dma_addr, length, direction);
} }
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -94,17 +94,6 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) ...@@ -94,17 +94,6 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
return NULL; return NULL;
} }
void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *handle)
{
int gfp = GFP_KERNEL;
if (hwdev == NULL || dev_is_sa1111(hwdev) ||
hwdev->dma_mask != 0xffffffff)
gfp |= GFP_DMA;
return consistent_alloc(gfp, size, handle);
}
/* /*
* free a page as defined by the above mapping. We expressly forbid * free a page as defined by the above mapping. We expressly forbid
* calling this from interrupt context. * calling this from interrupt context.
......
#include <asm-generic/dma-mapping.h> #ifndef ASMARM_DMA_MAPPING_H
#define ASMARM_DMA_MAPPING_H
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/mm.h> /* need struct page */
#include <asm/scatterlist.h>
/*
* DMA-consistent mapping functions. These allocate/free a region of
* uncached, unwrite-buffered mapped memory space for use with DMA
* devices. This is the "generic" version. The PCI specific version
* is in pci.h
*/
extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle);
extern void consistent_sync(void *kaddr, size_t size, int rw);
/*
* For SA-1111 these functions are "magic" and utilize bounce
* bufferes as needed to work around SA-1111 DMA bugs.
*/
dma_addr_t sa1111_map_single(void *, size_t, int);
void sa1111_unmap_single(dma_addr_t, size_t, int);
int sa1111_map_sg(struct scatterlist *, int, int);
void sa1111_unmap_sg(struct scatterlist *, int, int);
void sa1111_dma_sync_single(dma_addr_t, size_t, int);
void sa1111_dma_sync_sg(struct scatterlist *, int, int);
#ifdef CONFIG_SA1111
extern struct bus_type sa1111_bus_type;
#define dmadev_is_sa1111(dev) ((dev)->bus == &sa1111_bus_type)
#else
#define dmadev_is_sa1111(dev) (0)
#endif
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during PCI bus mastering, then you would pass 0x00ffffff as the mask
* to this function.
*/
static inline int dma_supported(struct device *dev, u64 mask)
{
return 1;
}
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
*dev->dma_mask = dma_mask;
return 0;
}
static inline int dma_get_cache_alignment(void)
{
return 32;
}
static inline int dma_is_consistent(dma_addr_t handle)
{
return 0;
}
/**
* dma_alloc_coherent - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: required memory size
* @handle: bus-specific DMA address
*
* Allocate some uncached, unbuffered memory for a device for
* performing DMA. This function allocates pages, and will
* return the CPU-viewed address, and sets @handle to be the
* device-viewed address.
*/
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle)
{
int gfp = GFP_KERNEL;
if (dev == NULL || dmadev_is_sa1111(dev) || *dev->dma_mask != 0xffffffff)
gfp |= GFP_DMA;
return consistent_alloc(gfp, size, handle);
}
/**
* dma_free_coherent - free memory allocated by dma_alloc_coherent
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: size of memory originally requested in dma_alloc_coherent
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
*
* Free (and unmap) a DMA buffer previously allocated by
* dma_alloc_coherent().
*
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
*/
static inline void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle)
{
consistent_free(cpu_addr, size, handle);
}
/**
* dma_map_single - map a single buffer for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @cpu_addr: CPU direct mapped address of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
* Ensure that any data held in the cache is appropriately discarded
* or written back.
*
* The device owns this memory once this call has completed. The CPU
* can regain ownership by calling dma_unmap_single() or dma_sync_single().
*/
static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev))
return sa1111_map_single(cpu_addr, size, dir);
consistent_sync(cpu_addr, size, dir);
return __virt_to_bus((unsigned long)cpu_addr);
}
/**
* dma_unmap_single - unmap a single buffer previously mapped
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @handle: DMA address of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
* Unmap a single streaming mode DMA translation. The handle and size
* must match what was provided in the previous dma_map_single() call.
* All other usages are undefined.
*
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
*/
static inline void
dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev))
sa1111_unmap_single(handle, size, dir);
/* nothing to do */
}
#if 0
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
/* fixme */
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
/* fixme */
}
#endif
/**
* dma_map_sg - map a set of SG buffers for streaming mode DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
* @dir: DMA transfer direction
*
* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length. They are obtained via sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
int i;
if (dmadev_is_sa1111(dev))
return sa1111_map_sg(sg, nents, dir);
for (i = 0; i < nents; i++, sg++) {
char *virt;
sg->dma_address = page_to_bus(sg->page) + sg->offset;
virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, dir);
}
return nents;
}
/**
* dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
* @dir: DMA transfer direction
*
* Unmap a set of streaming mode DMA translations.
* Again, CPU read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev)) {
sa1111_unmap_sg(sg, nents, dir);
return;
}
/* nothing to do */
}
/**
* dma_sync_single
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @handle: DMA address of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
* Make physical memory consistent for a single streaming mode DMA
* translation after a transfer.
*
* If you perform a pci_map_single() but wish to interrogate the
* buffer using the cpu, yet do not wish to teardown the PCI dma
* mapping, you must call this function before doing so. At the
* next point you give the PCI dma address back to the card, the
* device again owns the buffer.
*/
static inline void
dma_sync_single(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_single(handle, size, dir);
return;
}
consistent_sync((void *)__bus_to_virt(handle), size, dir);
}
/**
* dma_sync_sg
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
* @dir: DMA transfer direction
*
* Make physical memory consistent for a set of streaming
* mode DMA translations after a transfer.
*
* The same as pci_dma_sync_single but for a scatter-gather list,
* same rules and usage.
*/
static inline void
dma_sync_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
int i;
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_sg(sg, nents, dir);
return;
}
for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, dir);
}
}
#endif /* __KERNEL__ */
#endif
...@@ -267,16 +267,6 @@ extern void __iounmap(void *addr); ...@@ -267,16 +267,6 @@ extern void __iounmap(void *addr);
#define iounmap(cookie) __arch_iounmap(cookie) #define iounmap(cookie) __arch_iounmap(cookie)
#endif #endif
/*
* DMA-consistent mapping functions. These allocate/free a region of
* uncached, unwrite-buffered mapped memory space for use with DMA
* devices. This is the "generic" version. The PCI specific version
* is in pci.h
*/
extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle);
extern void consistent_sync(void *vaddr, size_t size, int rw);
/* /*
* can the hardware map this into one segment or not, given no other * can the hardware map this into one segment or not, given no other
* constraints. * constraints.
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment