Commit dc455d02 authored by Russell King's avatar Russell King

[ARM] Fix ups for ARM generic dma mapping interface

This brings the ARM dma mapping functionality into line with
the current generic interface, allowing any struct device to
be passed into the dma_* functions.

Further cleanups will be possible when the USB layer is
converted to use the dma_* API.
parent 14bf9e0a
......@@ -201,7 +201,6 @@ EXPORT_SYMBOL(__arch_clear_user);
EXPORT_SYMBOL(__arch_strnlen_user);
/* consistent area handling */
EXPORT_SYMBOL(pci_alloc_consistent);
EXPORT_SYMBOL(consistent_alloc);
EXPORT_SYMBOL(consistent_free);
EXPORT_SYMBOL(consistent_sync);
......
......@@ -21,8 +21,7 @@ obj-$(CONFIG_SA1100_PT_SYSTEM3) += cpu-sa1110.o
endif
# Next, the SA1111 stuff.
obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_USB_OHCI_HCD) += sa1111-pcibuf.o pcipool.o
obj-$(CONFIG_SA1111) += sa1111.o sa1111-pcibuf.o pcipool.o
# Specific board support
obj-$(CONFIG_SA1100_ADSBITSY) += adsbitsy.o
......
......@@ -56,7 +56,7 @@ static inline const char *slot_name(const struct pci_pool *pool)
if (pdev == 0)
return "[0]";
else if (dev_is_sa1111(pdev))
else if (pcidev_is_sa1111(pdev))
return "[SA-1111]";
else
return pdev->slot_name;
......
......@@ -124,7 +124,7 @@ destroy_safe_buffer_pools(void)
/* allocate a 'safe' buffer and keep track of it */
static struct safe_buffer *
alloc_safe_buffer(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
alloc_safe_buffer(void *ptr, size_t size, int direction)
{
struct safe_buffer *buf;
struct pci_pool *pool;
......@@ -254,7 +254,7 @@ static void print_map_stats(void)
#endif
static dma_addr_t
map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
map_single(void *ptr, size_t size, int direction)
{
dma_addr_t dma_addr;
......@@ -267,7 +267,7 @@ map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
DO_STATS ( bounce_count++ ) ;
buf = alloc_safe_buffer(hwdev, ptr, size, direction);
buf = alloc_safe_buffer(ptr, size, direction);
if (buf == 0) {
printk(KERN_ERR
"%s: unable to map unsafe buffer %p!\n",
......@@ -302,8 +302,7 @@ map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
}
static void
unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
unmap_single(dma_addr_t dma_addr, size_t size, int direction)
{
struct safe_buffer *buf;
......@@ -332,8 +331,7 @@ unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
}
static void
sync_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
sync_single(dma_addr_t dma_addr, size_t size, int direction)
{
struct safe_buffer *buf;
......@@ -381,20 +379,19 @@ sync_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
* (basically move the buffer from an unsafe area to a safe one)
*/
dma_addr_t
sa1111_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
sa1111_map_single(void *ptr, size_t size, int direction)
{
unsigned long flags;
dma_addr_t dma_addr;
DPRINTK("%s(hwdev=%p,ptr=%p,size=%d,dir=%x)\n",
__func__, hwdev, ptr, size, direction);
DPRINTK("%s(ptr=%p,size=%d,dir=%x)\n",
__func__, ptr, size, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
BUG_ON(direction == PCI_DMA_NONE);
local_irq_save(flags);
dma_addr = map_single(hwdev, ptr, size, direction);
dma_addr = map_single(ptr, size, direction);
local_irq_restore(flags);
......@@ -409,35 +406,31 @@ sa1111_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
*/
void
sa1111_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
sa1111_unmap_single(dma_addr_t dma_addr, size_t size, int direction)
{
unsigned long flags;
DPRINTK("%s(hwdev=%p,ptr=%p,size=%d,dir=%x)\n",
__func__, hwdev, (void *) dma_addr, size, direction);
DPRINTK("%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
BUG_ON(direction == PCI_DMA_NONE);
local_irq_save(flags);
unmap_single(hwdev, dma_addr, size, direction);
unmap_single(dma_addr, size, direction);
local_irq_restore(flags);
}
int
sa1111_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
sa1111_map_sg(struct scatterlist *sg, int nents, int direction)
{
unsigned long flags;
int i;
DPRINTK("%s(hwdev=%p,sg=%p,nents=%d,dir=%x)\n",
__func__, hwdev, sg, nents, direction);
DPRINTK("%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
BUG_ON(direction == PCI_DMA_NONE);
local_irq_save(flags);
......@@ -449,7 +442,7 @@ sa1111_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
void *ptr = page_address(page) + offset;
sg->dma_address =
map_single(hwdev, ptr, length, direction);
map_single(ptr, length, direction);
}
local_irq_restore(flags);
......@@ -458,16 +451,14 @@ sa1111_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
}
void
sa1111_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
int direction)
sa1111_unmap_sg(struct scatterlist *sg, int nents, int direction)
{
unsigned long flags;
int i;
DPRINTK("%s(hwdev=%p,sg=%p,nents=%d,dir=%x)\n",
__func__, hwdev, sg, nents, direction);
DPRINTK("%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
BUG_ON(direction == PCI_DMA_NONE);
local_irq_save(flags);
......@@ -476,41 +467,36 @@ sa1111_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
dma_addr_t dma_addr = sg->dma_address;
unsigned int length = sg->length;
unmap_single(hwdev, dma_addr, length, direction);
unmap_single(dma_addr, length, direction);
}
local_irq_restore(flags);
}
void
sa1111_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
sa1111_dma_sync_single(dma_addr_t dma_addr, size_t size, int direction)
{
unsigned long flags;
DPRINTK("%s(hwdev=%p,ptr=%p,size=%d,dir=%x)\n",
__func__, hwdev, (void *) dma_addr, size, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
DPRINTK("%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, direction);
local_irq_save(flags);
sync_single(hwdev, dma_addr, size, direction);
sync_single(dma_addr, size, direction);
local_irq_restore(flags);
}
void
sa1111_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
sa1111_dma_sync_sg(struct scatterlist *sg, int nents, int direction)
{
unsigned long flags;
int i;
DPRINTK("%s(hwdev=%p,sg=%p,nents=%d,dir=%x)\n",
__func__, hwdev, sg, nents, direction);
DPRINTK("%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
BUG_ON(direction == PCI_DMA_NONE);
local_irq_save(flags);
......@@ -519,7 +505,7 @@ sa1111_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg,
dma_addr_t dma_addr = sg->dma_address;
unsigned int length = sg->length;
sync_single(hwdev, dma_addr, length, direction);
sync_single(dma_addr, length, direction);
}
local_irq_restore(flags);
......
......@@ -94,17 +94,6 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
return NULL;
}
void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *handle)
{
int gfp = GFP_KERNEL;
if (hwdev == NULL || dev_is_sa1111(hwdev) ||
hwdev->dma_mask != 0xffffffff)
gfp |= GFP_DMA;
return consistent_alloc(gfp, size, handle);
}
/*
* free a page as defined by the above mapping. We expressly forbid
* calling this from interrupt context.
......
#include <asm-generic/dma-mapping.h>
#ifndef ASMARM_DMA_MAPPING_H
#define ASMARM_DMA_MAPPING_H
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/mm.h> /* need struct page */
#include <asm/scatterlist.h>
/*
* DMA-consistent mapping functions. These allocate/free a region of
* uncached, unwrite-buffered mapped memory space for use with DMA
* devices. This is the "generic" version. The PCI specific version
* is in pci.h
*/
extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle);
extern void consistent_sync(void *kaddr, size_t size, int rw);
/*
* For SA-1111 these functions are "magic" and utilize bounce
* bufferes as needed to work around SA-1111 DMA bugs.
*/
dma_addr_t sa1111_map_single(void *, size_t, int);
void sa1111_unmap_single(dma_addr_t, size_t, int);
int sa1111_map_sg(struct scatterlist *, int, int);
void sa1111_unmap_sg(struct scatterlist *, int, int);
void sa1111_dma_sync_single(dma_addr_t, size_t, int);
void sa1111_dma_sync_sg(struct scatterlist *, int, int);
#ifdef CONFIG_SA1111
extern struct bus_type sa1111_bus_type;
#define dmadev_is_sa1111(dev) ((dev)->bus == &sa1111_bus_type)
#else
#define dmadev_is_sa1111(dev) (0)
#endif
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during PCI bus mastering, then you would pass 0x00ffffff as the mask
* to this function.
*/
static inline int dma_supported(struct device *dev, u64 mask)
{
return 1;
}
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
*dev->dma_mask = dma_mask;
return 0;
}
static inline int dma_get_cache_alignment(void)
{
return 32;
}
static inline int dma_is_consistent(dma_addr_t handle)
{
return 0;
}
/**
* dma_alloc_coherent - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: required memory size
* @handle: bus-specific DMA address
*
* Allocate some uncached, unbuffered memory for a device for
* performing DMA. This function allocates pages, and will
* return the CPU-viewed address, and sets @handle to be the
* device-viewed address.
*/
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle)
{
int gfp = GFP_KERNEL;
if (dev == NULL || dmadev_is_sa1111(dev) || *dev->dma_mask != 0xffffffff)
gfp |= GFP_DMA;
return consistent_alloc(gfp, size, handle);
}
/**
* dma_free_coherent - free memory allocated by dma_alloc_coherent
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: size of memory originally requested in dma_alloc_coherent
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
*
* Free (and unmap) a DMA buffer previously allocated by
* dma_alloc_coherent().
*
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
*/
static inline void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle)
{
consistent_free(cpu_addr, size, handle);
}
/**
* dma_map_single - map a single buffer for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @cpu_addr: CPU direct mapped address of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
* Ensure that any data held in the cache is appropriately discarded
* or written back.
*
* The device owns this memory once this call has completed. The CPU
* can regain ownership by calling dma_unmap_single() or dma_sync_single().
*/
static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev))
return sa1111_map_single(cpu_addr, size, dir);
consistent_sync(cpu_addr, size, dir);
return __virt_to_bus((unsigned long)cpu_addr);
}
/**
* dma_unmap_single - unmap a single buffer previously mapped
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @handle: DMA address of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
* Unmap a single streaming mode DMA translation. The handle and size
* must match what was provided in the previous dma_map_single() call.
* All other usages are undefined.
*
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
*/
static inline void
dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev))
sa1111_unmap_single(handle, size, dir);
/* nothing to do */
}
#if 0
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
/* fixme */
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
/* fixme */
}
#endif
/**
* dma_map_sg - map a set of SG buffers for streaming mode DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
* @dir: DMA transfer direction
*
* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length. They are obtained via sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
int i;
if (dmadev_is_sa1111(dev))
return sa1111_map_sg(sg, nents, dir);
for (i = 0; i < nents; i++, sg++) {
char *virt;
sg->dma_address = page_to_bus(sg->page) + sg->offset;
virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, dir);
}
return nents;
}
/**
* dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
* @dir: DMA transfer direction
*
* Unmap a set of streaming mode DMA translations.
* Again, CPU read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev)) {
sa1111_unmap_sg(sg, nents, dir);
return;
}
/* nothing to do */
}
/**
* dma_sync_single
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @handle: DMA address of buffer
* @size: size of buffer to map
* @dir: DMA transfer direction
*
* Make physical memory consistent for a single streaming mode DMA
* translation after a transfer.
*
* If you perform a pci_map_single() but wish to interrogate the
* buffer using the cpu, yet do not wish to teardown the PCI dma
* mapping, you must call this function before doing so. At the
* next point you give the PCI dma address back to the card, the
* device again owns the buffer.
*/
static inline void
dma_sync_single(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_single(handle, size, dir);
return;
}
consistent_sync((void *)__bus_to_virt(handle), size, dir);
}
/**
* dma_sync_sg
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
* @dir: DMA transfer direction
*
* Make physical memory consistent for a set of streaming
* mode DMA translations after a transfer.
*
* The same as pci_dma_sync_single but for a scatter-gather list,
* same rules and usage.
*/
static inline void
dma_sync_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
int i;
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_sg(sg, nents, dir);
return;
}
for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, dir);
}
}
#endif /* __KERNEL__ */
#endif
......@@ -267,16 +267,6 @@ extern void __iounmap(void *addr);
#define iounmap(cookie) __arch_iounmap(cookie)
#endif
/*
* DMA-consistent mapping functions. These allocate/free a region of
* uncached, unwrite-buffered mapped memory space for use with DMA
* devices. This is the "generic" version. The PCI specific version
* is in pci.h
*/
extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
extern void consistent_free(void *vaddr, size_t size, dma_addr_t handle);
extern void consistent_sync(void *vaddr, size_t size, int rw);
/*
* can the hardware map this into one segment or not, given no other
* constraints.
......
......@@ -3,36 +3,20 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/mm.h> /* bah! */
#include <linux/dma-mapping.h>
#include <asm/arch/hardware.h>
#include <asm/scatterlist.h>
#include <asm/page.h>
#include <asm/io.h>
struct pci_dev;
#include <asm/hardware.h> /* for PCIBIOS_MIN_* */
#ifdef CONFIG_SA1111
/*
* For SA-1111 these functions are "magic" and utilize bounce
* buffers as need to workaround SA-1111 DMA bugs. They are called in
* place of their pci_* counterparts when dev_is_sa1111() returns true.
* Keep the SA1111 DMA-mapping tricks until the USB layer gets
* properly converted to the new DMA-mapping API, at which time
* most of this file can die.
*/
dma_addr_t sa1111_map_single(struct pci_dev *, void *, size_t, int);
void sa1111_unmap_single(struct pci_dev *, dma_addr_t, size_t, int);
int sa1111_map_sg(struct pci_dev *, struct scatterlist *, int, int);
void sa1111_unmap_sg(struct pci_dev *, struct scatterlist *, int, int);
void sa1111_dma_sync_single(struct pci_dev *, dma_addr_t, size_t, int);
void sa1111_dma_sync_sg(struct pci_dev *, struct scatterlist *, int, int);
#ifdef CONFIG_SA1111
#define SA1111_FAKE_PCIDEV ((struct pci_dev *) 1111)
#define dev_is_sa1111(dev) (dev == SA1111_FAKE_PCIDEV)
#define pcidev_is_sa1111(dev) (dev == SA1111_FAKE_PCIDEV)
#else
#define dev_is_sa1111(dev) (0)
#define pcidev_is_sa1111(dev) (0)
#endif
......@@ -46,200 +30,116 @@ static inline void pcibios_penalize_isa_irq(int irq)
/* We don't do dynamic PCI IRQ allocation */
}
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions.
/*
* The PCI address space does equal the physical memory address space.
* The networking and block device layers use this boolean for bounce
* buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (0)
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices,
* NULL for PCI-like buses (ISA, EISA).
* Returns non-NULL cpu-view pointer to the buffer if successful and
* sets *dma_addrp to the pci side dma address as well, else *dma_addrp
* is undefined.
*/
extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *handle);
static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *handle)
{
int gfp = GFP_KERNEL;
if (hwdev == NULL || pcidev_is_sa1111(hwdev) ||
hwdev->dma_mask != 0xffffffff)
gfp |= GFP_DMA;
return consistent_alloc(gfp, size, handle);
}
/* Free and unmap a consistent DMA buffer.
* cpu_addr is what was returned from pci_alloc_consistent,
* size must be the same as what as passed into pci_alloc_consistent,
* and likewise dma_addr must be the same as what *dma_addrp was set to.
*
* References to the memory and mappings associated with cpu_addr/dma_addr
* past this call are illegal.
*/
static inline void
pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle)
dma_addr_t handle)
{
consistent_free(vaddr, size, dma_handle);
dma_free_coherent(hwdev ? &hwdev->dev : NULL, size, vaddr, handle);
}
/* Map a single buffer of the indicated size for DMA in streaming mode.
* The 32-bit bus address to use is returned.
*
* Once the device is given the dma address, the device owns this memory
* until either pci_unmap_single or pci_dma_sync_single is performed.
*/
static inline dma_addr_t
pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int dir)
{
if (dev_is_sa1111(hwdev))
return sa1111_map_single(hwdev, ptr, size, direction);
if (pcidev_is_sa1111(hwdev))
return sa1111_map_single(ptr, size, dir);
consistent_sync(ptr, size, direction);
return virt_to_bus(ptr);
return dma_map_single(hwdev ? &hwdev->dev : NULL, ptr, size, dir);
}
/* Unmap a single streaming mode DMA translation. The dma_addr and size
* must match what was provided for in a previous pci_map_single call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guarenteed to see
* whatever the device wrote there.
*/
static inline void
pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
pci_unmap_single(struct pci_dev *hwdev, dma_addr_t handle, size_t size, int dir)
{
if (dev_is_sa1111(hwdev))
sa1111_unmap_single(hwdev, dma_addr, size, direction);
if (pcidev_is_sa1111(hwdev)) {
sa1111_unmap_single(handle, size, dir);
return;
}
/* nothing to do */
return dma_unmap_single(hwdev ? &hwdev->dev : NULL, handle, size, dir);
}
/*
* Whether pci_unmap_{single,page} is a nop depends upon the
* configuration.
*/
#if defined(CONFIG_PCI) || defined(CONFIG_SA1111)
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME;
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME;
#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
#else
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
#endif /* CONFIG_PCI */
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length. They are obtained via sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
static inline int
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dir)
{
int i;
if (dev_is_sa1111(hwdev))
return sa1111_map_sg(hwdev, sg, nents, direction);
for (i = 0; i < nents; i++, sg++) {
char *virt;
sg->dma_address = page_to_bus(sg->page) + sg->offset;
virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, direction);
}
if (pcidev_is_sa1111(hwdev))
return sa1111_map_sg(sg, nents, dir);
return nents;
return dma_map_sg(hwdev ? &hwdev->dev : NULL, sg, nents, dir);
}
/* Unmap a set of streaming mode DMA translations.
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
static inline void
pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dir)
{
if (dev_is_sa1111(hwdev)) {
sa1111_unmap_sg(hwdev, sg, nents, direction);
if (pcidev_is_sa1111(hwdev)) {
sa1111_unmap_sg(sg, nents, dir);
return;
}
/* nothing to do */
return dma_unmap_sg(hwdev ? &hwdev->dev : NULL, sg, nents, dir);
}
/* Make physical memory consistent for a single
* streaming mode DMA translation after a transfer.
*
* If you perform a pci_map_single() but wish to interrogate the
* buffer using the cpu, yet do not wish to teardown the PCI dma
* mapping, you must call this function before doing so. At the
* next point you give the PCI dma address back to the card, the
* device again owns the buffer.
*/
static inline void
pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t handle, size_t size, int dir)
{
if (dev_is_sa1111(hwdev)) {
sa1111_dma_sync_single(hwdev, dma_handle, size, direction);
if (pcidev_is_sa1111(hwdev)) {
sa1111_dma_sync_single(handle, size, dir);
return;
}
consistent_sync(bus_to_virt(dma_handle), size, direction);
return dma_sync_single(hwdev ? &hwdev->dev : NULL, handle, size, dir);
}
/* Make physical memory consistent for a set of streaming
* mode DMA translations after a transfer.
*
* The same as pci_dma_sync_single but for a scatter-gather list,
* same rules and usage.
*/
static inline void
pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int dir)
{
int i;
if (dev_is_sa1111(hwdev)) {
sa1111_dma_sync_sg(hwdev, sg, nelems, direction);
if (pcidev_is_sa1111(hwdev)) {
sa1111_dma_sync_sg(sg, nelems, dir);
return;
}
for (i = 0; i < nelems; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, direction);
}
return dma_sync_sg(hwdev ? &hwdev->dev : NULL, sg, nelems, dir);
}
/* Return whether the given PCI device DMA address mask can
* be supported properly. For example, if your device can
* only drive the low 24-bits during PCI bus mastering, then
* you would pass 0x00ffffff as the mask to this function.
*/
static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
{
return 1;
}
/* This isn't fine. */
/*
* We don't support DAC DMA cycles.
*/
#define pci_dac_dma_supported(pci_dev, mask) (0)
/* Return the index of the PCI controller for device PDEV. */
/*
* Return the index of the PCI controller for device PDEV.
*/
#define pci_controller_num(PDEV) (0)
#if defined(CONFIG_SA1111) && !defined(CONFIG_PCI)
/* SA-1111 needs these prototypes even when !defined(CONFIG_PCI) */
/* kmem_cache style wrapper around pci_alloc_consistent() */
/*
* SA-1111 needs these prototypes even when !defined(CONFIG_PCI)
*
* kmem_cache style wrapper around pci_alloc_consistent()
*/
struct pci_pool *pci_pool_create (const char *name, struct pci_dev *dev,
size_t size, size_t align, size_t allocation);
void pci_pool_destroy (struct pci_pool *pool);
......@@ -248,6 +148,26 @@ void *pci_pool_alloc (struct pci_pool *pool, int flags, dma_addr_t *handle);
void pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t addr);
#endif
/*
* Whether pci_unmap_{single,page} is a nop depends upon the
* configuration.
*/
#if defined(CONFIG_PCI) || defined(CONFIG_SA1111)
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME;
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME;
#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
#else
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
#endif
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment