Commit 622a9edd authored by Ralf Baechle's avatar Ralf Baechle Committed by Linus Torvalds

Remove dma_cache_(wback|inv|wback_inv) functions

dma_cache_(wback|inv|wback_inv) were the earliest attempt on a generalized
cache managment API for I/O purposes.  Originally it was basically the raw
MIPS low level cache API exported to the entire world.  The API has
suffered from a lack of documentation, was not very widely used unlike it's
more modern brothers and can easily be replaced by dma_cache_sync.  So
remove it rsp.  turn the surviving bits back into an arch private API, as
discussed on linux-arch.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
Acked-by: default avatarPaul Mundt <lethal@linux-sh.org>
Acked-by: default avatarPaul Mackerras <paulus@samba.org>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarKyle McMartin <kyle@parisc-linux.org>
Acked-by: default avatarHaavard Skinnemoen <hskinnemoen@atmel.com>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bc154b1e
......@@ -21,13 +21,13 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
dma_cache_inv(vaddr, size);
invalidate_dcache_region(vaddr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
dma_cache_wback(vaddr, size);
clean_dcache_region(vaddr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
dma_cache_wback_inv(vaddr, size);
flush_dcache_region(vaddr, size);
break;
default:
BUG();
......
......@@ -47,8 +47,6 @@ void (*_dma_cache_wback)(unsigned long start, unsigned long size);
void (*_dma_cache_inv)(unsigned long start, unsigned long size);
EXPORT_SYMBOL(_dma_cache_wback_inv);
EXPORT_SYMBOL(_dma_cache_wback);
EXPORT_SYMBOL(_dma_cache_inv);
#endif /* CONFIG_DMA_NONCOHERENT */
......
......@@ -51,7 +51,7 @@ void *dreamcast_consistent_alloc(struct device *dev, size_t size,
buf = P2SEGADDR(buf);
/* Flush the dcache before we hand off the buffer */
dma_cache_wback_inv((void *)buf, size);
__flush_purge_region((void *)buf, size);
return (void *)buf;
}
......
......@@ -34,7 +34,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
/*
* We must flush the cache before we pass it on to the device
*/
dma_cache_wback_inv(ret, size);
__flush_purge_region(ret, size);
page = virt_to_page(ret);
free = page + (size >> PAGE_SHIFT);
......@@ -68,13 +68,13 @@ void consistent_sync(void *vaddr, size_t size, int direction)
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
dma_cache_inv(p1addr, size);
__flush_invalidate_region(p1addr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
dma_cache_wback(p1addr, size);
__flush_wback_region(p1addr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
dma_cache_wback_inv(p1addr, size);
__flush_purge_region(p1addr, size);
break;
default:
BUG();
......
......@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <asm/io.h>
......@@ -32,7 +33,7 @@ void *consistent_alloc(struct pci_dev *hwdev, size_t size,
if (vp != NULL) {
memset(vp, 0, size);
*dma_handle = virt_to_phys(ret);
dma_cache_wback_inv((unsigned long)ret, size);
dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL);
}
return vp;
......
......@@ -551,12 +551,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
#endif
#define RTC_ALWAYS_BCD 0
/* Nothing to do */
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
/*
* Some mucking forons use if[n]def writeq to check if platform has it.
* It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
......
......@@ -298,13 +298,6 @@ extern void __iounmap(void __iomem *addr);
#define ioport_map(port, nr) ioremap(port, nr)
#define ioport_unmap(port) iounmap(port)
#define dma_cache_wback_inv(_start, _size) \
flush_dcache_region(_start, _size)
#define dma_cache_inv(_start, _size) \
invalidate_dcache_region(_start, _size)
#define dma_cache_wback(_start, _size) \
clean_dcache_region(_start, _size)
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
......
......@@ -183,10 +183,6 @@ extern void blkfin_inv_cache_all(void);
#define ioport_map(port, nr) ((void __iomem*)(port))
#define ioport_unmap(addr)
#define dma_cache_inv(_start,_size) do { blkfin_inv_cache_all();} while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { blkfin_inv_cache_all();} while (0)
/* Pages to physical address... */
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
......
......@@ -264,12 +264,6 @@ static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size
extern void iounmap(void *addr);
/* Nothing to do */
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
/* H8/300 internal I/O functions */
static __inline__ unsigned char ctrl_inb(unsigned long addr)
{
......
......@@ -435,10 +435,6 @@ extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
extern void memset_io(volatile void __iomem *s, int c, long n);
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
# endif /* __KERNEL__ */
/*
......
......@@ -384,12 +384,6 @@ static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}
/* m68k caches aren't DMA coherent */
extern void dma_cache_wback_inv(unsigned long start, unsigned long size);
extern void dma_cache_wback(unsigned long start, unsigned long size);
extern void dma_cache_inv(unsigned long start, unsigned long size);
static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
__builtin_memset((void __force *) addr, val, count);
......
......@@ -165,12 +165,6 @@ static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size
extern void iounmap(void *addr);
/* Nothing to do */
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
/* Pages to physical address... */
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
......
......@@ -554,6 +554,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
* caches. Dirty lines of the caches may be written back or simply
* be discarded. This operation is necessary before dma operations
* to the memory.
*
* This API used to be exported; it now is for arch code internal use only.
*/
#ifdef CONFIG_DMA_NONCOHERENT
......
......@@ -270,11 +270,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
/* IO Port space is : BBiiii where BB is HBA number. */
#define IO_SPACE_LIMIT 0x00ffffff
#define dma_cache_inv(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while (0)
#define dma_cache_wback(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while (0)
#define dma_cache_wback_inv(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while (0)
/* PA machines have an MM I/O space from 0xf0000000-0xffffffff in 32
* bit mode and from 0xfffffffff0000000-0xfffffffffffffff in 64 bit
* mode (essentially just sign extending. This macro takes in a 32
......
......@@ -498,23 +498,6 @@ static inline void name at \
#define writeq writeq
#endif
#ifdef CONFIG_NOT_COHERENT_CACHE
#define dma_cache_inv(_start,_size) \
invalidate_dcache_range(_start, (_start + _size))
#define dma_cache_wback(_start,_size) \
clean_dcache_range(_start, (_start + _size))
#define dma_cache_wback_inv(_start,_size) \
flush_dcache_range(_start, (_start + _size))
#else /* CONFIG_NOT_COHERENT_CACHE */
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
#endif /* !CONFIG_NOT_COHERENT_CACHE */
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
......
......@@ -478,23 +478,6 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#include <asm/mpc8260_pci9.h>
#endif
#ifdef CONFIG_NOT_COHERENT_CACHE
#define dma_cache_inv(_start,_size) \
invalidate_dcache_range(_start, (_start + _size))
#define dma_cache_wback(_start,_size) \
clean_dcache_range(_start, (_start + _size))
#define dma_cache_wback_inv(_start,_size) \
flush_dcache_range(_start, (_start + _size))
#else
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
#endif
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
......
......@@ -213,7 +213,7 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
}
#endif
dma_cache_wback_inv(addr, size);
__flush_purge_region(addr, size);
/* actual, physical DMA */
doing_pdma = 0;
......
......@@ -326,31 +326,6 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
#define iounmap(addr) \
__iounmap((addr))
/*
* The caches on some architectures aren't dma-coherent and have need to
* handle this in software. There are three types of operations that
* can be applied to dma buffers.
*
* - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
* writing the content of the caches back to memory, if necessary.
* The function also invalidates the affected part of the caches as
* necessary before DMA transfers from outside to memory.
* - dma_cache_inv(start, size) invalidates the affected parts of the
* caches. Dirty lines of the caches may be written back or simply
* be discarded. This operation is necessary before dma operations
* to the memory.
* - dma_cache_wback(start, size) writes back any dirty lines but does
* not invalidate the cache. This can be used before DMA reads from
* memory,
*/
#define dma_cache_wback_inv(_start,_size) \
__flush_purge_region(_start,_size)
#define dma_cache_inv(_start,_size) \
__flush_invalidate_region(_start,_size)
#define dma_cache_wback(_start,_size) \
__flush_wback_region(_start,_size)
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
......
......@@ -42,7 +42,11 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
dma_cache_wback_inv((unsigned long)vaddr, size);
unsigned long s = (unsigned long) vaddr & L1_CACHE_ALIGN_MASK;
unsigned long e = (vaddr + size) & L1_CACHE_ALIGN_MASK;
for (; s <= e; s += L1_CACHE_BYTES)
asm volatile ("ocbp %0, 0" : : "r" (s));
}
static inline dma_addr_t dma_map_single(struct device *dev,
......
......@@ -181,54 +181,6 @@ extern void iounmap(void *addr);
unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name);
extern void onchip_unmap(unsigned long vaddr);
/*
* The caches on some architectures aren't dma-coherent and have need to
* handle this in software. There are three types of operations that
* can be applied to dma buffers.
*
* - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
* writing the content of the caches back to memory, if necessary.
* The function also invalidates the affected part of the caches as
* necessary before DMA transfers from outside to memory.
* - dma_cache_inv(start, size) invalidates the affected parts of the
* caches. Dirty lines of the caches may be written back or simply
* be discarded. This operation is necessary before dma operations
* to the memory.
* - dma_cache_wback(start, size) writes back any dirty lines but does
* not invalidate the cache. This can be used before DMA reads from
* memory,
*/
static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
{
unsigned long s = start & L1_CACHE_ALIGN_MASK;
unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
for (; s <= e; s += L1_CACHE_BYTES)
asm volatile ("ocbp %0, 0" : : "r" (s));
}
static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
{
// Note that caller has to be careful with overzealous
// invalidation should there be partial cache lines at the extremities
// of the specified range
unsigned long s = start & L1_CACHE_ALIGN_MASK;
unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
for (; s <= e; s += L1_CACHE_BYTES)
asm volatile ("ocbi %0, 0" : : "r" (s));
}
static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
{
unsigned long s = start & L1_CACHE_ALIGN_MASK;
unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
for (; s <= e; s += L1_CACHE_BYTES)
asm volatile ("ocbwb %0, 0" : : "r" (s));
}
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
......
......@@ -310,13 +310,6 @@ extern void sbus_iounmap(volatile void __iomem *vaddr, unsigned long size);
#define RTC_PORT(x) (rtc_port + (x))
#define RTC_ALWAYS_BCD 0
/* Nothing to do */
/* P3: Only IDE DMA may need these. XXX Verify that it still does... */
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
#endif
#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
......
......@@ -474,12 +474,6 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#define sbus_iounmap(__addr, __size) \
release_region((unsigned long)(__addr), (__size))
/* Nothing to do */
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
......
......@@ -237,18 +237,9 @@ static inline void flush_write_buffers(void)
__asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
}
#define dma_cache_inv(_start,_size) flush_write_buffers()
#define dma_cache_wback(_start,_size) flush_write_buffers()
#define dma_cache_wback_inv(_start,_size) flush_write_buffers()
#else
/* Nothing to do */
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
#define flush_write_buffers()
#define flush_write_buffers() do { } while (0)
#endif
......
......@@ -249,12 +249,6 @@ void memset_io(volatile void __iomem *a, int b, size_t c);
*/
#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
/* Nothing to do */
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
#define flush_write_buffers()
extern int iommu_bio_merge;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment