Commit cea79535 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

[PATCH] PA-RISC: Sort out io accessors

 - sparse annotations for ioremap/iounmap (Randolph Chung)
 - Turn gsc_readb, __raw_readb and readb functions into static inline
   functions (Matthew Wilcox)
 - Document the difference between the gsc_readb, __raw_readb and readb
   families of functions (Matthew Wilcox)
 - Add a debugging option to determine when they are being used incorrectly
   (Matthew Wilcox)
 - Make memcpy_fromio's second argument const (Matthew Wilcox)
Signed-off-by: default avatarMatthew Wilcox <willy@parisc-linux.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent ad548e8b
......@@ -87,9 +87,9 @@ EXPORT_SYMBOL($global$);
#include <asm/io.h>
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(__memcpy_toio);
EXPORT_SYMBOL(__memcpy_fromio);
EXPORT_SYMBOL(__memset_io);
EXPORT_SYMBOL(memcpy_toio);
EXPORT_SYMBOL(memcpy_fromio);
EXPORT_SYMBOL(memset_io);
#include <asm/unistd.h>
EXPORT_SYMBOL(sys_open);
......
......@@ -15,24 +15,24 @@
* Assumes the device can cope with 32-bit transfers. If it can't,
* don't use this function.
*/
void __memcpy_toio(unsigned long dest, unsigned long src, int count)
void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
{
if ((dest & 3) != (src & 3))
if (((unsigned long)dst & 3) != ((unsigned long)src & 3))
goto bytecopy;
while (dest & 3) {
writeb(*(char *)src, dest++);
while ((unsigned long)dst & 3) {
writeb(*(char *)src, dst++);
src++;
count--;
}
while (count > 3) {
__raw_writel(*(u32 *)src, dest);
__raw_writel(*(u32 *)src, dst);
src += 4;
dest += 4;
dst += 4;
count -= 4;
}
bytecopy:
while (count--) {
writeb(*(char *)src, dest++);
writeb(*(char *)src, dst++);
src++;
}
}
......@@ -50,51 +50,51 @@ void __memcpy_toio(unsigned long dest, unsigned long src, int count)
** Minimize total number of transfers at cost of CPU cycles.
** TODO: only look at src alignment and adjust the stores to dest.
*/
void __memcpy_fromio(unsigned long dest, unsigned long src, int count)
void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
{
/* first compare alignment of src/dst */
if ( ((dest ^ src) & 1) || (count < 2) )
if ( (((unsigned long)dst ^ (unsigned long)src) & 1) || (count < 2) )
goto bytecopy;
if ( ((dest ^ src) & 2) || (count < 4) )
if ( (((unsigned long)dst ^ (unsigned long)src) & 2) || (count < 4) )
goto shortcopy;
/* Then check for misaligned start address */
if (src & 1) {
*(u8 *)dest = readb(src);
if ((unsigned long)src & 1) {
*(u8 *)dst = readb(src);
src++;
dest++;
dst++;
count--;
if (count < 2) goto bytecopy;
}
if (src & 2) {
*(u16 *)dest = __raw_readw(src);
if ((unsigned long)src & 2) {
*(u16 *)dst = __raw_readw(src);
src += 2;
dest += 2;
dst += 2;
count -= 2;
}
while (count > 3) {
*(u32 *)dest = __raw_readl(src);
dest += 4;
*(u32 *)dst = __raw_readl(src);
dst += 4;
src += 4;
count -= 4;
}
shortcopy:
while (count > 1) {
*(u16 *)dest = __raw_readw(src);
*(u16 *)dst = __raw_readw(src);
src += 2;
dest += 2;
dst += 2;
count -= 2;
}
bytecopy:
while (count--) {
*(char *)dest = readb(src);
*(char *)dst = readb(src);
src++;
dest++;
dst++;
}
}
......@@ -102,20 +102,20 @@ void __memcpy_fromio(unsigned long dest, unsigned long src, int count)
* Assumes the device can cope with 32-bit transfers. If it can't,
* don't use this function.
*/
void __memset_io(unsigned long dest, char fill, int count)
void memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
u32 fill32 = (fill << 24) | (fill << 16) | (fill << 8) | fill;
while (dest & 3) {
writeb(fill, dest++);
u32 val32 = (val << 24) | (val << 16) | (val << 8) | val;
while ((unsigned long)addr & 3) {
writeb(val, addr++);
count--;
}
while (count > 3) {
__raw_writel(fill32, dest);
dest += 4;
__raw_writel(val32, addr);
addr += 4;
count -= 4;
}
while (count--) {
writeb(fill, dest++);
writeb(val, addr++);
}
}
......
......@@ -11,6 +11,7 @@
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
......@@ -94,6 +95,30 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr,
}
#endif /* USE_HPPA_IOREMAP */
#ifdef CONFIG_DEBUG_IOREMAP
static unsigned long last = 0;
void gsc_bad_addr(unsigned long addr)
{
if (time_after(jiffies, last + HZ*10)) {
printk("gsc_foo() called with bad address 0x%lx\n", addr);
dump_stack();
last = jiffies;
}
}
EXPORT_SYMBOL(gsc_bad_addr);
void __raw_bad_addr(const volatile void __iomem *addr)
{
if (time_after(jiffies, last + HZ*10)) {
printk("__raw_foo() called with bad address 0x%p\n", addr);
dump_stack();
last = jiffies;
}
}
EXPORT_SYMBOL(__raw_bad_addr);
#endif
/*
* Generic mapping function (not visible outside):
*/
......@@ -107,7 +132,7 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr,
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
#if !(USE_HPPA_IOREMAP)
......@@ -118,7 +143,11 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
phys_addr |= 0xfc000000;
}
return (void *)phys_addr;
#ifdef CONFIG_DEBUG_IOREMAP
return (void __iomem *)(phys_addr - (0x1UL << NYBBLE_SHIFT));
#else
return (void __iomem *)phys_addr;
#endif
#else
void * addr;
......@@ -163,16 +192,16 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
vfree(addr);
return NULL;
}
return (void *) (offset + (char *)addr);
return (void __iomem *) (offset + (char *)addr);
#endif
}
void iounmap(void *addr)
void iounmap(void __iomem *addr)
{
#if !(USE_HPPA_IOREMAP)
return;
#else
if (addr > high_memory)
return vfree((void *) (PAGE_MASK & (unsigned long) addr));
return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
#endif
}
#ifndef _ASM_IO_H
#define _ASM_IO_H
/* USE_HPPA_IOREMAP IS THE MAGIC FLAG TO ENABLE OR DISABLE REAL IOREMAP() FUNCTIONALITY */
/* FOR 712 or 715 MACHINES THIS SHOULD BE ENABLED,
NEWER MACHINES STILL HAVE SOME ISSUES IN THE SCSI AND/OR NETWORK DRIVERS AND
BECAUSE OF THAT I WILL LEAVE IT DISABLED FOR NOW <deller@gmx.de> */
/* WHEN THOSE ISSUES ARE SOLVED, USE_HPPA_IOREMAP WILL GO AWAY */
#define USE_HPPA_IOREMAP 0
#include <linux/config.h>
#include <linux/types.h>
#include <asm/pgtable.h>
......@@ -24,38 +16,44 @@ extern unsigned long parisc_vmerge_max_size;
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
/* Memory mapped IO */
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
extern inline void * ioremap(unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, 0);
}
/*
* This one maps high address device memory and turns off caching for that area.
* it's useful if some control registers are in such an area and write combining
* or read caching is not desirable:
* Memory mapped I/O
*
* readX()/writeX() do byteswapping and take an ioremapped address
* __raw_readX()/__raw_writeX() don't byteswap and take an ioremapped address.
* gsc_*() don't byteswap and operate on physical addresses;
* eg dev->hpa or 0xfee00000.
*/
extern inline void * ioremap_nocache(unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, _PAGE_NO_CACHE /* _PAGE_PCD */);
}
extern void iounmap(void *addr);
#ifdef CONFIG_DEBUG_IOREMAP
#ifdef CONFIG_64BIT
#define NYBBLE_SHIFT 60
#else
#define NYBBLE_SHIFT 28
#endif
extern void gsc_bad_addr(unsigned long addr);
extern void __raw_bad_addr(const volatile void __iomem *addr);
#define gsc_check_addr(addr) \
if ((addr >> NYBBLE_SHIFT) != 0xf) { \
gsc_bad_addr(addr); \
addr |= 0xfUL << NYBBLE_SHIFT; \
}
#define __raw_check_addr(addr) \
if (((unsigned long)addr >> NYBBLE_SHIFT) != 0xe) \
__raw_bad_addr(addr); \
addr = (void *)((unsigned long)addr | (0xfUL << NYBBLE_SHIFT));
#else
#define gsc_check_addr(addr)
#define __raw_check_addr(addr)
#endif
/*
* __raw_ variants have no defined meaning. on hppa, it means `i was
* too lazy to ioremap first'. kind of like isa_, except that there's
* no additional base address to add on.
*/
#define __raw_readb(a) ___raw_readb((unsigned long)(a))
extern __inline__ unsigned char ___raw_readb(unsigned long addr)
static inline unsigned char gsc_readb(unsigned long addr)
{
long flags;
unsigned char ret;
gsc_check_addr(addr);
__asm__ __volatile__(
" rsm 2,%0\n"
" ldbx 0(%2),%1\n"
......@@ -65,12 +63,13 @@ extern __inline__ unsigned char ___raw_readb(unsigned long addr)
return ret;
}
#define __raw_readw(a) ___raw_readw((unsigned long)(a))
extern __inline__ unsigned short ___raw_readw(unsigned long addr)
static inline unsigned short gsc_readw(unsigned long addr)
{
long flags;
unsigned short ret;
gsc_check_addr(addr);
__asm__ __volatile__(
" rsm 2,%0\n"
" ldhx 0(%2),%1\n"
......@@ -80,11 +79,12 @@ extern __inline__ unsigned short ___raw_readw(unsigned long addr)
return ret;
}
#define __raw_readl(a) ___raw_readl((unsigned long)(a))
extern __inline__ unsigned int ___raw_readl(unsigned long addr)
static inline unsigned int gsc_readl(unsigned long addr)
{
u32 ret;
gsc_check_addr(addr);
__asm__ __volatile__(
" ldwax 0(%1),%0\n"
: "=r" (ret) : "r" (addr) );
......@@ -92,26 +92,28 @@ extern __inline__ unsigned int ___raw_readl(unsigned long addr)
return ret;
}
#define __raw_readq(a) ___raw_readq((unsigned long)(a))
extern __inline__ unsigned long long ___raw_readq(unsigned long addr)
static inline unsigned long long gsc_readq(unsigned long addr)
{
unsigned long long ret;
gsc_check_addr(addr);
#ifdef __LP64__
__asm__ __volatile__(
" ldda 0(%1),%0\n"
: "=r" (ret) : "r" (addr) );
#else
/* two reads may have side effects.. */
ret = ((u64) __raw_readl(addr)) << 32;
ret |= __raw_readl(addr+4);
ret = ((u64) gsc_readl(addr)) << 32;
ret |= gsc_readl(addr+4);
#endif
return ret;
}
#define __raw_writeb(a,b) ___raw_writeb(a, (unsigned long)(b))
extern __inline__ void ___raw_writeb(unsigned char val, unsigned long addr)
static inline void gsc_writeb(unsigned char val, unsigned long addr)
{
long flags;
gsc_check_addr(addr);
__asm__ __volatile__(
" rsm 2,%0\n"
" stbs %1,0(%2)\n"
......@@ -119,10 +121,11 @@ extern __inline__ void ___raw_writeb(unsigned char val, unsigned long addr)
: "=&r" (flags) : "r" (val), "r" (addr) );
}
#define __raw_writew(a,b) ___raw_writew(a, (unsigned long)(b))
extern __inline__ void ___raw_writew(unsigned short val, unsigned long addr)
static inline void gsc_writew(unsigned short val, unsigned long addr)
{
long flags;
gsc_check_addr(addr);
__asm__ __volatile__(
" rsm 2,%0\n"
" sths %1,0(%2)\n"
......@@ -130,88 +133,180 @@ extern __inline__ void ___raw_writew(unsigned short val, unsigned long addr)
: "=&r" (flags) : "r" (val), "r" (addr) );
}
#define __raw_writel(a,b) ___raw_writel(a, (unsigned long)(b))
extern __inline__ void ___raw_writel(unsigned int val, unsigned long addr)
static inline void gsc_writel(unsigned int val, unsigned long addr)
{
gsc_check_addr(addr);
__asm__ __volatile__(
" stwas %0,0(%1)\n"
: : "r" (val), "r" (addr) );
}
#define __raw_writeq(a,b) ___raw_writeq(a, (unsigned long)(b))
extern __inline__ void ___raw_writeq(unsigned long long val, unsigned long addr)
static inline void gsc_writeq(unsigned long long val, unsigned long addr)
{
gsc_check_addr(addr);
#ifdef __LP64__
__asm__ __volatile__(
" stda %0,0(%1)\n"
: : "r" (val), "r" (addr) );
#else
/* two writes may have side effects.. */
__raw_writel(val >> 32, addr);
__raw_writel(val, addr+4);
gsc_writel(val >> 32, addr);
gsc_writel(val, addr+4);
#endif
}
/*
* The standard PCI ioremap interfaces
*/
extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
extern inline void __iomem * ioremap(unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, 0);
}
/*
* This one maps high address device memory and turns off caching for that area.
* it's useful if some control registers are in such an area and write combining
* or read caching is not desirable:
*/
extern inline void * ioremap_nocache(unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, _PAGE_NO_CACHE /* _PAGE_PCD */);
}
extern void iounmap(void __iomem *addr);
/*
* USE_HPPA_IOREMAP is the magic flag to enable or disable real ioremap()
* functionality. It's currently disabled because it may not work on some
* machines.
*/
#define USE_HPPA_IOREMAP 0
#if USE_HPPA_IOREMAP
#define readb(addr) (*(volatile unsigned char *) (addr))
#define readw(addr) (*(volatile unsigned short *) (addr))
#define readl(addr) (*(volatile unsigned int *) (addr))
#define readq(addr) (*(volatile u64 *) (addr))
#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
#define writeq(b,addr) (*(volatile u64 *) (addr) = (b))
static inline unsigned char __raw_readb(const volatile void __iomem *addr)
{
return (*(volatile unsigned char __force *) (addr));
}
static inline unsigned short __raw_readw(const volatile void __iomem *addr)
{
return *(volatile unsigned short __force *) addr;
}
static inline unsigned int __raw_readl(const volatile void __iomem *addr)
{
return *(volatile unsigned int __force *) addr;
}
static inline unsigned long long __raw_readq(const volatile void __iomem *addr)
{
return *(volatile unsigned long long __force *) addr;
}
static inline void __raw_writeb(unsigned char b, volatile void __iomem *addr)
{
*(volatile unsigned char __force *) addr = b;
}
static inline void __raw_writew(unsigned short b, volatile void __iomem *addr)
{
*(volatile unsigned short __force *) addr = b;
}
static inline void __raw_writel(unsigned int b, volatile void __iomem *addr)
{
*(volatile unsigned int __force *) addr = b;
}
static inline void __raw_writeq(unsigned long long b, volatile void __iomem *addr)
{
*(volatile unsigned long long __force *) addr = b;
}
#else /* !USE_HPPA_IOREMAP */
static inline unsigned char __raw_readb(const volatile void __iomem *addr)
{
__raw_check_addr(addr);
return gsc_readb((unsigned long) addr);
}
static inline unsigned short __raw_readw(const volatile void __iomem *addr)
{
__raw_check_addr(addr);
return gsc_readw((unsigned long) addr);
}
static inline unsigned int __raw_readl(const volatile void __iomem *addr)
{
__raw_check_addr(addr);
return gsc_readl((unsigned long) addr);
}
static inline unsigned long long __raw_readq(const volatile void __iomem *addr)
{
__raw_check_addr(addr);
return gsc_readq((unsigned long) addr);
}
static inline void __raw_writeb(unsigned char b, volatile void __iomem *addr)
{
__raw_check_addr(addr);
gsc_writeb(b, (unsigned long) addr);
}
static inline void __raw_writew(unsigned short b, volatile void __iomem *addr)
{
__raw_check_addr(addr);
gsc_writew(b, (unsigned long) addr);
}
static inline void __raw_writel(unsigned int b, volatile void __iomem *addr)
{
__raw_check_addr(addr);
gsc_writel(b, (unsigned long) addr);
}
static inline void __raw_writeq(unsigned long long b, volatile void __iomem *addr)
{
__raw_check_addr(addr);
gsc_writeq(b, (unsigned long) addr);
}
#endif /* !USE_HPPA_IOREMAP */
#define readb(addr) __raw_readb(addr)
#define readw(addr) le16_to_cpu(__raw_readw(addr))
#define readl(addr) le32_to_cpu(__raw_readl(addr))
#define readq(addr) le64_to_cpu(__raw_readq(addr))
#define writeb(b,addr) __raw_writeb(b,addr)
#define writew(b,addr) __raw_writew(cpu_to_le16(b),addr)
#define writel(b,addr) __raw_writel(cpu_to_le32(b),addr)
#define writeq(b,addr) __raw_writeq(cpu_to_le64(b),addr)
#endif /* !USE_HPPA_IOREMAP */
#define writeb(b, addr) __raw_writeb(b, addr)
#define writew(b, addr) __raw_writew(cpu_to_le16(b), addr)
#define writel(b, addr) __raw_writel(cpu_to_le32(b), addr)
#define writeq(b, addr) __raw_writeq(cpu_to_le64(b), addr)
#define readb_relaxed(addr) readb(addr)
#define readw_relaxed(addr) readw(addr)
#define readl_relaxed(addr) readl(addr)
#define readq_relaxed(addr) readq(addr)
#define mmiowb()
extern void __memcpy_fromio(unsigned long dest, unsigned long src, int count);
extern void __memcpy_toio(unsigned long dest, unsigned long src, int count);
extern void __memset_io(unsigned long dest, char fill, int count);
#define mmiowb() do { } while (0)
#define memcpy_fromio(a,b,c) __memcpy_fromio((unsigned long)(a), (unsigned long)(b), (c))
#define memcpy_toio(a,b,c) __memcpy_toio((unsigned long)(a), (unsigned long)(b), (c))
#define memset_io(a,b,c) __memset_io((unsigned long)(a), (b), (c))
void memset_io(volatile void __iomem *addr, unsigned char val, int count);
void memcpy_fromio(void *dst, const volatile void __iomem *src, int count);
void memcpy_toio(volatile void __iomem *dst, const void *src, int count);
/* Support old drivers which don't ioremap.
* NB this interface is scheduled to disappear in 2.5
*/
#define EISA_BASE 0xfffffffffc000000UL
#define isa_readb(a) readb(EISA_BASE | (a))
#define isa_readw(a) readw(EISA_BASE | (a))
#define isa_readl(a) readl(EISA_BASE | (a))
#define isa_writeb(b,a) writeb((b), EISA_BASE | (a))
#define isa_writew(b,a) writew((b), EISA_BASE | (a))
#define isa_writel(b,a) writel((b), EISA_BASE | (a))
#define isa_memset_io(a,b,c) memset_io(EISA_BASE | (a), (b), (c))
#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a), EISA_BASE | (b), (c))
#define isa_memcpy_toio(a,b,c) memcpy_toio(EISA_BASE | (a), (b), (c))
/*
* These functions support PA-RISC drivers which don't yet call ioremap().
* They will disappear once the last of these drivers is gone.
*/
#define gsc_readb(x) __raw_readb(x)
#define gsc_readw(x) __raw_readw(x)
#define gsc_readl(x) __raw_readl(x)
#define gsc_writeb(x, y) __raw_writeb(x, y)
#define gsc_writew(x, y) __raw_writew(x, y)
#define gsc_writel(x, y) __raw_writel(x, y)
#define __isa_addr(x) (void __iomem *)(F_EXTEND(0xfc000000) | (x))
#define isa_readb(a) readb(__isa_addr(a))
#define isa_readw(a) readw(__isa_addr(a))
#define isa_readl(a) readl(__isa_addr(a))
#define isa_writeb(b,a) writeb((b), __isa_addr(a))
#define isa_writew(b,a) writew((b), __isa_addr(a))
#define isa_writel(b,a) writel((b), __isa_addr(a))
#define isa_memset_io(a,b,c) memset_io(__isa_addr(a), (b), (c))
#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a), __isa_addr(b), (c))
#define isa_memcpy_toio(a,b,c) memcpy_toio(__isa_addr(a), (b), (c))
/*
......
......@@ -117,6 +117,19 @@ config DEBUG_INFO
If you're truly short on disk space or don't expect to report any
bugs back to the UML developers, say N, otherwise say Y.
config DEBUG_IOREMAP
bool "Enable ioremap() debugging"
depends on DEBUG_KERNEL && PARISC
help
Enabling this option will cause the kernel to distinguish between
ioremapped and physical addresses. It will print a backtrace (at
most one every 10 seconds), hopefully allowing you to see which
drivers need work. Fixing all these problems is a prerequisite
for turning on USE_HPPA_IOREMAP. The warnings are harmless;
the kernel has enough information to fix the broken drivers
automatically, but we'd like to make it more efficient by not
having to do that.
config DEBUG_FS
bool "Debug Filesystem"
depends on DEBUG_KERNEL
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment