Commit 1e0b058c authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

[PATCH] include/asm-parisc

Update include/asm-parisc
parent db299c0d
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
.level 2.0w .level 2.0w
#endif #endif
#include <asm/offset.h> #include <asm/offsets.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/asmregs.h> #include <asm/asmregs.h>
...@@ -36,17 +36,34 @@ ...@@ -36,17 +36,34 @@
gp = 27 gp = 27
ipsw = 22 ipsw = 22
#if __PAGE_OFFSET == 0xc0000000 /*
.macro tophys gr * We provide two versions of each macro to convert from physical
zdep \gr, 31, 30, \gr * to virtual and vice versa. The "_r1" versions take one argument
* register, but trashes r1 to do the conversion. The other
* version takes two arguments: a src and destination register.
* However, the source and destination registers can not be
* the same register.
*/
.macro tophys grvirt, grphys
ldil L%(__PAGE_OFFSET), \grphys
sub \grvirt, \grphys, \grphys
.endm .endm
.macro tovirt gr .macro tovirt grphys, grvirt
depi 3,1,2,\gr ldil L%(__PAGE_OFFSET), \grvirt
add \grphys, \grvirt, \grvirt
.endm
.macro tophys_r1 gr
ldil L%(__PAGE_OFFSET), %r1
sub \gr, %r1, \gr
.endm
.macro tovirt_r1 gr
ldil L%(__PAGE_OFFSET), %r1
add \gr, %r1, \gr
.endm .endm
#else
#error unknown __PAGE_OFFSET
#endif
.macro delay value .macro delay value
ldil L%\value, 1 ldil L%\value, 1
...@@ -59,11 +76,21 @@ ...@@ -59,11 +76,21 @@
.macro debug value .macro debug value
.endm .endm
#ifdef __LP64__
# define LDIL_FIXUP(reg) depdi 0,31,32,reg /* Shift Left - note the r and t can NOT be the same! */
#else .macro shl r, sa, t
# define LDIL_FIXUP(reg) dep,z \r, 31-\sa, 32-\sa, \t
#endif .endm
/* The PA 2.0 shift left */
.macro shlw r, sa, t
depw,z \r, 31-\sa, 32-\sa, \t
.endm
/* And the PA 2.0W shift left */
.macro shld r, sa, t
depd,z \r, 63-\sa, 64-\sa, \t
.endm
/* load 32-bit 'value' into 'reg' compensating for the ldil /* load 32-bit 'value' into 'reg' compensating for the ldil
* sign-extension when running in wide mode. * sign-extension when running in wide mode.
...@@ -72,7 +99,6 @@ ...@@ -72,7 +99,6 @@
.macro load32 value, reg .macro load32 value, reg
ldil L%\value, \reg ldil L%\value, \reg
ldo R%\value(\reg), \reg ldo R%\value(\reg), \reg
LDIL_FIXUP(\reg)
.endm .endm
#ifdef __LP64__ #ifdef __LP64__
...@@ -89,7 +115,6 @@ ...@@ -89,7 +115,6 @@
#ifdef __LP64__ #ifdef __LP64__
ldil L%__gp, %r27 ldil L%__gp, %r27
ldo R%__gp(%r27), %r27 ldo R%__gp(%r27), %r27
LDIL_FIXUP(%r27)
#else #else
ldil L%$global$, %r27 ldil L%$global$, %r27
ldo R%$global$(%r27), %r27 ldo R%$global$(%r27), %r27
...@@ -102,6 +127,7 @@ ...@@ -102,6 +127,7 @@
#define REST_CR(r, where) LDREG where, %r1 ! mtctl %r1, r #define REST_CR(r, where) LDREG where, %r1 ! mtctl %r1, r
.macro save_general regs .macro save_general regs
STREG %r1, PT_GR1 (\regs)
STREG %r2, PT_GR2 (\regs) STREG %r2, PT_GR2 (\regs)
STREG %r3, PT_GR3 (\regs) STREG %r3, PT_GR3 (\regs)
STREG %r4, PT_GR4 (\regs) STREG %r4, PT_GR4 (\regs)
...@@ -126,15 +152,16 @@ ...@@ -126,15 +152,16 @@
STREG %r23, PT_GR23(\regs) STREG %r23, PT_GR23(\regs)
STREG %r24, PT_GR24(\regs) STREG %r24, PT_GR24(\regs)
STREG %r25, PT_GR25(\regs) STREG %r25, PT_GR25(\regs)
/* r26 is clobbered by cr19 and assumed to be saved before hand */ /* r26 is saved in get_stack and used to preserve a value across virt_map */
STREG %r27, PT_GR27(\regs) STREG %r27, PT_GR27(\regs)
STREG %r28, PT_GR28(\regs) STREG %r28, PT_GR28(\regs)
/* r29 is already saved and points to PT_xxx struct */ /* r29 is saved in get_stack and used to point to saved registers */
/* r30 stack pointer saved in get_stack */ /* r30 stack pointer saved in get_stack */
STREG %r31, PT_GR31(\regs) STREG %r31, PT_GR31(\regs)
.endm .endm
.macro rest_general regs .macro rest_general regs
/* r1 used as a temp in rest_stack and is restored there */
LDREG PT_GR2 (\regs), %r2 LDREG PT_GR2 (\regs), %r2
LDREG PT_GR3 (\regs), %r3 LDREG PT_GR3 (\regs), %r3
LDREG PT_GR4 (\regs), %r4 LDREG PT_GR4 (\regs), %r4
...@@ -162,6 +189,7 @@ ...@@ -162,6 +189,7 @@
LDREG PT_GR26(\regs), %r26 LDREG PT_GR26(\regs), %r26
LDREG PT_GR27(\regs), %r27 LDREG PT_GR27(\regs), %r27
LDREG PT_GR28(\regs), %r28 LDREG PT_GR28(\regs), %r28
/* r29 points to register save area, and is restored in rest_stack */
/* r30 stack pointer restored in rest_stack */ /* r30 stack pointer restored in rest_stack */
LDREG PT_GR31(\regs), %r31 LDREG PT_GR31(\regs), %r31
.endm .endm
...@@ -238,8 +266,8 @@ ...@@ -238,8 +266,8 @@
#ifdef __LP64__ #ifdef __LP64__
.macro callee_save .macro callee_save
ldo 144(%r30), %r30 std,ma %r3, 144(%r30)
std %r3, -144(%r30) mfctl %cr27, %r3
std %r4, -136(%r30) std %r4, -136(%r30)
std %r5, -128(%r30) std %r5, -128(%r30)
std %r6, -120(%r30) std %r6, -120(%r30)
...@@ -255,9 +283,11 @@ ...@@ -255,9 +283,11 @@
std %r16, -40(%r30) std %r16, -40(%r30)
std %r17, -32(%r30) std %r17, -32(%r30)
std %r18, -24(%r30) std %r18, -24(%r30)
std %r3, -16(%r30)
.endm .endm
.macro callee_rest .macro callee_rest
ldd -16(%r30), %r3
ldd -24(%r30), %r18 ldd -24(%r30), %r18
ldd -32(%r30), %r17 ldd -32(%r30), %r17
ldd -40(%r30), %r16 ldd -40(%r30), %r16
...@@ -273,52 +303,54 @@ ...@@ -273,52 +303,54 @@
ldd -120(%r30), %r6 ldd -120(%r30), %r6
ldd -128(%r30), %r5 ldd -128(%r30), %r5
ldd -136(%r30), %r4 ldd -136(%r30), %r4
ldd -144(%r30), %r3 mtctl %r3, %cr27
ldo -144(%r30), %r30 ldd,mb -144(%r30), %r3
.endm .endm
#else /* __LP64__ */ #else /* ! __LP64__ */
.macro callee_save .macro callee_save
ldo 128(30), 30 stw,ma %r3, 128(%r30)
stw 3, -128(30) mfctl %cr27, %r3
stw 4, -124(30) stw %r4, -124(%r30)
stw 5, -120(30) stw %r5, -120(%r30)
stw 6, -116(30) stw %r6, -116(%r30)
stw 7, -112(30) stw %r7, -112(%r30)
stw 8, -108(30) stw %r8, -108(%r30)
stw 9, -104(30) stw %r9, -104(%r30)
stw 10, -100(30) stw %r10, -100(%r30)
stw 11, -96(30) stw %r11, -96(%r30)
stw 12, -92(30) stw %r12, -92(%r30)
stw 13, -88(30) stw %r13, -88(%r30)
stw 14, -84(30) stw %r14, -84(%r30)
stw 15, -80(30) stw %r15, -80(%r30)
stw 16, -76(30) stw %r16, -76(%r30)
stw 17, -72(30) stw %r17, -72(%r30)
stw 18, -68(30) stw %r18, -68(%r30)
stw %r3, -64(%r30)
.endm .endm
.macro callee_rest .macro callee_rest
ldw -68(30), 18 ldw -64(%r30), %r3
ldw -72(30), 17 ldw -68(%r30), %r18
ldw -76(30), 16 ldw -72(%r30), %r17
ldw -80(30), 15 ldw -76(%r30), %r16
ldw -84(30), 14 ldw -80(%r30), %r15
ldw -88(30), 13 ldw -84(%r30), %r14
ldw -92(30), 12 ldw -88(%r30), %r13
ldw -96(30), 11 ldw -92(%r30), %r12
ldw -100(30), 10 ldw -96(%r30), %r11
ldw -104(30), 9 ldw -100(%r30), %r10
ldw -108(30), 8 ldw -104(%r30), %r9
ldw -112(30), 7 ldw -108(%r30), %r8
ldw -116(30), 6 ldw -112(%r30), %r7
ldw -120(30), 5 ldw -116(%r30), %r6
ldw -124(30), 4 ldw -120(%r30), %r5
ldw -128(30), 3 ldw -124(%r30), %r4
ldo -128(30), 30 mtctl %r3, %cr27
ldw,mb -128(%r30), %r3
.endm .endm
#endif /* __LP64__ */ #endif /* ! __LP64__ */
.macro save_specials regs .macro save_specials regs
...@@ -339,14 +371,25 @@ ...@@ -339,14 +371,25 @@
mtctl %r0, %cr18 mtctl %r0, %cr18
SAVE_CR (%cr18, PT_IAOQ1(\regs)) SAVE_CR (%cr18, PT_IAOQ1(\regs))
#ifdef __LP64__
/* cr11 (sar) is a funny one. 5 bits on PA1.1 and 6 bit on PA2.0
* For PA2.0 mtsar or mtctl always write 6 bits, but mfctl only
* reads 5 bits. Use mfctl,w to read all six bits. Otherwise
* we loose the 6th bit on a save/restore over interrupt.
*/
mfctl,w %cr11, %r1
STREG %r1, PT_SAR (\regs)
#else
SAVE_CR (%cr11, PT_SAR (\regs)) SAVE_CR (%cr11, PT_SAR (\regs))
SAVE_CR (%cr22, PT_PSW (\regs)) #endif
SAVE_CR (%cr19, PT_IIR (\regs)) SAVE_CR (%cr19, PT_IIR (\regs))
SAVE_CR (%cr28, PT_GR1 (\regs))
SAVE_CR (%cr31, PT_GR29 (\regs))
STREG %r26, PT_GR26 (\regs) /*
mfctl %cr29, %r26 * Code immediately following this macro (in intr_save) relies
* on r8 containing ipsw.
*/
mfctl %cr22, %r8
STREG %r8, PT_PSW(\regs)
.endm .endm
.macro rest_specials regs .macro rest_specials regs
......
...@@ -15,10 +15,12 @@ ...@@ -15,10 +15,12 @@
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* we have an array of spinlocks for our atomic_ts, and a hash function /* Use an array of spinlocks for our atomic_ts.
* to get the right index */ ** Hash function to index into a different SPINLOCK.
# define ATOMIC_HASH_SIZE 1 ** Since "a" is usually an address, ">>8" makes one spinlock per 64-bytes.
# define ATOMIC_HASH(a) (&__atomic_hash[0]) */
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long) a)>>8)&(ATOMIC_HASH_SIZE-1)])
extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE]; extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE];
/* copied from <asm/spinlock.h> and modified */ /* copied from <asm/spinlock.h> and modified */
...@@ -44,12 +46,101 @@ extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE]; ...@@ -44,12 +46,101 @@ extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE];
/* Note that we need not lock read accesses - aligned word writes/reads /* Note that we need not lock read accesses - aligned word writes/reads
* are atomic, so a reader never sees unconsistent values. * are atomic, so a reader never sees unconsistent values.
* *
* Cache-line alignment would conflict with, for example, linux/module.h */ * Cache-line alignment would conflict with, for example, linux/module.h
*/
typedef struct { typedef struct {
volatile int counter; volatile int counter;
} atomic_t; } atomic_t;
/*
** xchg/cmpxchg moved from asm/system.h - ggg
*/
#if 1
/* This should get optimized out since it's never called.
** Or get a link error if xchg is used "wrong".
*/
extern void __xchg_called_with_bad_pointer(void);
#else
static inline void __xchg_called_with_bad_pointer(void)
{
extern void panic(const char * fmt, ...);
panic("xchg called with bad pointer");
}
#endif
/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __xchg8(char, char *);
extern unsigned long __xchg32(int, int *);
#ifdef __LP64__
extern unsigned long __xchg64(unsigned long, unsigned long *);
#endif
/* optimizer better get rid of switch since size is a constant */
static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
int size)
{
switch(size) {
#ifdef __LP64__
case 8: return __xchg64(x,(unsigned long *) ptr);
#endif
case 4: return __xchg32((int) x, (int *) ptr);
case 1: return __xchg8((char) x, (char *) ptr);
}
__xchg_called_with_bad_pointer();
return x;
}
/*
** REVISIT - Abandoned use of LDCW in xchg() for now:
** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
** o and while we are at it, could __LP64__ code use LDCD too?
**
** if (__builtin_constant_p(x) && (x == NULL))
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define __HAVE_ARCH_CMPXCHG 1
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
/* don't worry...optimizer will get rid of most of this */
static __inline__ unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch(size) {
#ifdef __LP64__
case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#define cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
/* It's possible to reduce all atomic operations to either /* It's possible to reduce all atomic operations to either
* __atomic_add_return, __atomic_set and __atomic_ret (the latter * __atomic_add_return, __atomic_set and __atomic_ret (the latter
* is there only for consistency). */ * is there only for consistency). */
...@@ -100,4 +191,9 @@ static __inline__ int __atomic_read(atomic_t *v) ...@@ -100,4 +191,9 @@ static __inline__ int __atomic_read(atomic_t *v)
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
#endif #endif
#ifndef _PARISC_BITOPS_H #ifndef _PARISC_BITOPS_H
#define _PARISC_BITOPS_H #define _PARISC_BITOPS_H
#include <linux/spinlock.h> #include <linux/compiler.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/atomic.h> #include <asm/atomic.h>
/*
* HP-PARISC specific bit operations
* for a detailed description of the functions please refer
* to include/asm-i386/bitops.h or kerneldoc
*/
#ifdef __LP64__ #ifdef __LP64__
# define SHIFT_PER_LONG 6 # define SHIFT_PER_LONG 6
#ifndef BITS_PER_LONG #ifndef BITS_PER_LONG
...@@ -20,6 +26,79 @@ ...@@ -20,6 +26,79 @@
#define CHOP_SHIFTCOUNT(x) ((x) & (BITS_PER_LONG - 1)) #define CHOP_SHIFTCOUNT(x) ((x) & (BITS_PER_LONG - 1))
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
static __inline__ void set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
*addr |= mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
}
static __inline__ void __set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
*addr |= mask;
}
static __inline__ void clear_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
*addr &= ~mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
}
static __inline__ void __clear_bit(unsigned long nr, volatile void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
*addr &= ~mask;
}
static __inline__ void change_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
*addr ^= mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
}
static __inline__ void __change_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
*addr ^= mask;
}
static __inline__ int test_and_set_bit(int nr, void * address) static __inline__ int test_and_set_bit(int nr, void * address)
{ {
unsigned long mask; unsigned long mask;
...@@ -28,14 +107,26 @@ static __inline__ int test_and_set_bit(int nr, void * address) ...@@ -28,14 +107,26 @@ static __inline__ int test_and_set_bit(int nr, void * address)
unsigned long flags; unsigned long flags;
addr += (nr >> SHIFT_PER_LONG); addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags); SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
oldbit = (*addr & mask) ? 1 : 0;
*addr |= mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
return oldbit;
}
static __inline__ int __test_and_set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
int oldbit;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr); mask = 1L << CHOP_SHIFTCOUNT(nr);
oldbit = (*addr & mask) ? 1 : 0; oldbit = (*addr & mask) ? 1 : 0;
*addr |= mask; *addr |= mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
return oldbit; return oldbit;
} }
...@@ -47,14 +138,26 @@ static __inline__ int test_and_clear_bit(int nr, void * address) ...@@ -47,14 +138,26 @@ static __inline__ int test_and_clear_bit(int nr, void * address)
unsigned long flags; unsigned long flags;
addr += (nr >> SHIFT_PER_LONG); addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags); SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
oldbit = (*addr & mask) ? 1 : 0;
*addr &= ~mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
return oldbit;
}
static __inline__ int __test_and_clear_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
int oldbit;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr); mask = 1L << CHOP_SHIFTCOUNT(nr);
oldbit = (*addr & mask) ? 1 : 0; oldbit = (*addr & mask) ? 1 : 0;
*addr &= ~mask; *addr &= ~mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
return oldbit; return oldbit;
} }
...@@ -66,45 +169,46 @@ static __inline__ int test_and_change_bit(int nr, void * address) ...@@ -66,45 +169,46 @@ static __inline__ int test_and_change_bit(int nr, void * address)
unsigned long flags; unsigned long flags;
addr += (nr >> SHIFT_PER_LONG); addr += (nr >> SHIFT_PER_LONG);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
mask = 1L << CHOP_SHIFTCOUNT(nr); mask = 1L << CHOP_SHIFTCOUNT(nr);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
oldbit = (*addr & mask) ? 1 : 0; oldbit = (*addr & mask) ? 1 : 0;
*addr ^= mask; *addr ^= mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags); SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
return oldbit; return oldbit;
} }
/* again, the read-only case doesn't have to do any locking */ static __inline__ int __test_and_change_bit(int nr, void * address)
static __inline__ int test_bit(int nr, const volatile void *address)
{ {
unsigned long mask; unsigned long mask;
unsigned long *addr = (unsigned long *) address; unsigned long *addr = (unsigned long *) address;
int oldbit;
addr += (nr >> SHIFT_PER_LONG); addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr); mask = 1L << CHOP_SHIFTCOUNT(nr);
oldbit = (*addr & mask) ? 1 : 0;
*addr ^= mask;
return !!(*addr & mask); return oldbit;
} }
/* sparc does this, other arch's don't -- what's the right answer? XXX */ static __inline__ int test_bit(int nr, const void *address)
#define smp_mb__before_clear_bit() do { } while(0) {
#define smp_mb__after_clear_bit() do { } while(0) unsigned long mask;
#define set_bit(nr,addr) ((void)test_and_set_bit(nr,addr)) unsigned long *addr = (unsigned long *) address;
#define clear_bit(nr,addr) ((void)test_and_clear_bit(nr,addr))
#define change_bit(nr,addr) ((void)test_and_change_bit(nr,addr))
/* XXX We'd need some binary search here */ addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
return !!(*addr & mask);
}
extern __inline__ unsigned long ffz(unsigned long word) extern __inline__ unsigned long ffz(unsigned long word)
{ {
unsigned long result; unsigned long result;
result = 0; result = 0;
while(word & 1) { while (word & 1) {
result++; result++;
word >>= 1; word >>= 1;
} }
...@@ -114,13 +218,40 @@ extern __inline__ unsigned long ffz(unsigned long word) ...@@ -114,13 +218,40 @@ extern __inline__ unsigned long ffz(unsigned long word)
#ifdef __KERNEL__ #ifdef __KERNEL__
/**
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static __inline__ unsigned long __ffs(unsigned long word)
{
unsigned long result = 0;
while (!(word & 1UL)) {
result++;
word >>= 1;
}
return result;
}
/* /*
* ffs: find first bit set. This is defined the same way as * ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore * the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs). * differs in spirit from the above ffz (man ffs).
*/ */
static __inline__ int ffs(int x)
{
if (!x)
return 0;
return __ffs((unsigned long)x);
}
/*
* fls: find last bit set.
*/
#define ffs(x) generic_ffs(x) #define fls(x) generic_fls(x)
/* /*
* hweightN: returns the hamming weight (i.e. the number * hweightN: returns the hamming weight (i.e. the number
...@@ -131,6 +262,35 @@ extern __inline__ unsigned long ffz(unsigned long word) ...@@ -131,6 +262,35 @@ extern __inline__ unsigned long ffz(unsigned long word)
#define hweight16(x) generic_hweight16(x) #define hweight16(x) generic_hweight16(x)
#define hweight8(x) generic_hweight8(x) #define hweight8(x) generic_hweight8(x)
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
#ifndef __LP64__
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (b[3])
return __ffs(b[3]) + 96;
return __ffs(b[4]) + 128;
#else
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(((unsigned int)b[1])))
return __ffs(b[1]) + 64;
if (b[1] >> 32)
return __ffs(b[1] >> 32) + 96;
return __ffs(b[2]) + 128;
#endif
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
/* /*
...@@ -175,6 +335,44 @@ static __inline__ unsigned long find_next_zero_bit(void * addr, unsigned long si ...@@ -175,6 +335,44 @@ static __inline__ unsigned long find_next_zero_bit(void * addr, unsigned long si
return result + ffz(tmp); return result + ffz(tmp);
} }
static __inline__ unsigned long find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset)
{
unsigned long *p = addr + (offset >> 6);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset &= (BITS_PER_LONG-1);
if (offset) {
tmp = *(p++);
tmp &= (~0UL << offset);
if (size < BITS_PER_LONG)
goto found_first;
if (tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp &= (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + __ffs(tmp);
}
#define _EXT2_HAVE_ASM_BITOPS_ #define _EXT2_HAVE_ASM_BITOPS_
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -182,8 +380,13 @@ static __inline__ unsigned long find_next_zero_bit(void * addr, unsigned long si ...@@ -182,8 +380,13 @@ static __inline__ unsigned long find_next_zero_bit(void * addr, unsigned long si
* test_and_{set,clear}_bit guarantee atomicity without * test_and_{set,clear}_bit guarantee atomicity without
* disabling interrupts. * disabling interrupts.
*/ */
#ifdef __LP64__
#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x38, addr)
#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x38, addr)
#else
#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x18, addr) #define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x18, addr)
#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, addr) #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, addr)
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
...@@ -239,8 +442,9 @@ extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr, ...@@ -239,8 +442,9 @@ extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
} }
/* Bitmap functions for the minix filesystem. */ /* Bitmap functions for the minix filesystem. */
#define minix_set_bit(nr,addr) ext2_set_bit(nr,addr) #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
#define minix_clear_bit(nr,addr) ext2_clear_bit(nr,addr) #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
......
...@@ -5,27 +5,18 @@ ...@@ -5,27 +5,18 @@
#ifndef __ARCH_PARISC_CACHE_H #ifndef __ARCH_PARISC_CACHE_H
#define __ARCH_PARISC_CACHE_H #define __ARCH_PARISC_CACHE_H
#include <linux/config.h>
#ifndef __ASSEMBLY__
/* /*
** XXX FIXME : L1_CACHE_BYTES (cacheline size) should be a boot time thing. * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
** * 32-byte cachelines. The default configuration is not for SMP anyway,
** 32-bit on PA2.0 is not covered well by the #ifdef __LP64__ below. * so if you're building for SMP, you should select the appropriate
** PA2.0 processors have 64-byte cachelines. * processor type. There is a potential livelock danger when running
** * a machine with this value set too small, but it's more probable you'll
** The issue is mostly cacheline ping-ponging on SMP boxes. * just ruin performance.
** To avoid this, code should define stuff to be per CPU on cacheline */
** aligned boundaries. This can make a 2x or more difference in perf #ifdef CONFIG_PA20
** depending on how badly the thrashing is.
**
** We don't need to worry about I/O since all PA2.0 boxes (except T600)
** are I/O coherent. That means flushing less than you needed to generally
** doesn't matter - the I/O MMU will read/modify/write the cacheline.
**
** (Digression: it is possible to program I/O MMU's to not first read
** a cacheline for inbound data - ie just grab ownership and start writing.
** While it improves I/O throughput, you gotta know the device driver
** is well behaved and can deal with the issues.)
*/
#if defined(__LP64__)
#define L1_CACHE_BYTES 64 #define L1_CACHE_BYTES 64
#else #else
#define L1_CACHE_BYTES 32 #define L1_CACHE_BYTES 32
...@@ -38,22 +29,47 @@ ...@@ -38,22 +29,47 @@
#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES))) #define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
extern void init_cache(void); /* initializes cache-flushing */ extern void flush_data_cache_local(void); /* flushes local data-cache only */
extern void flush_data_cache(void); /* flushes data-cache only */ extern void flush_instruction_cache_local(void); /* flushes local code-cache only */
extern void flush_instruction_cache(void);/* flushes code-cache only */ #ifdef CONFIG_SMP
extern void flush_all_caches(void); /* flushes code and data-cache */ extern void flush_data_cache(void); /* flushes data-cache only (all processors) */
#else
#define flush_data_cache flush_data_cache_local
#define flush_instruction_cache flush_instruction_cache_local
#endif
extern void parisc_cache_init(void); /* initializes cache-flushing */
extern void flush_all_caches(void); /* flush everything (tlb & cache) */
extern int get_cache_info(char *); extern int get_cache_info(char *);
extern void flush_user_icache_range_asm(unsigned long, unsigned long);
extern void flush_kernel_icache_range_asm(unsigned long, unsigned long);
extern void flush_user_dcache_range_asm(unsigned long, unsigned long);
extern void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
extern void flush_kernel_dcache_page(void *);
extern void flush_kernel_icache_page(void *);
extern void disable_sr_hashing(void); /* turns off space register hashing */
extern void disable_sr_hashing_asm(int); /* low level support for above */
extern void free_sid(unsigned long);
unsigned long alloc_sid(void);
extern struct pdc_cache_info cache_info; struct seq_file;
extern void show_cache_info(struct seq_file *m);
#define fdce(addr) asm volatile("fdce 0(%0)" : : "r" (addr)) extern int split_tlb;
#define fice(addr) asm volatile("fice 0(%%sr1,%0)" : : "r" (addr)) extern int dcache_stride;
extern int icache_stride;
extern struct pdc_cache_info cache_info;
#define pdtlbe(addr) asm volatile("pdtlbe 0(%%sr1,%0)" : : "r" (addr)) #define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" : : "r" (addr));
#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" : : "r" (addr));
#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" : : "r" (addr)); #define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" : : "r" (addr));
#define pitlbe(addr) asm volatile("pitlbe 0(%%sr1,%0)" : : "r" (addr))
#define kernel_fdc(addr) asm volatile("fdc 0(%%sr0, %0)" : : "r" (addr)) #endif /* ! __ASSEMBLY__ */
/* Classes of processor wrt: disabling space register hashing */
#define SRHASH_PCXST 0 /* pcxs, pcxt, pcxt_ */
#define SRHASH_PCXL 1 /* pcxl */
#define SRHASH_PA20 2 /* pcxu, pcxu_, pcxw, pcxw_ */
#endif #endif
#ifndef _PARISC_CACHEFLUSH_H
#define _PARISC_CACHEFLUSH_H
#include <linux/config.h>
#include <linux/mm.h>
/* The usual comment is "Caches aren't brain-dead on the <architecture>".
* Unfortunately, that doesn't apply to PA-RISC. */
/* Cache flush operations */
#ifdef CONFIG_SMP
#define flush_cache_mm(mm) flush_cache_all()
#else
#define flush_cache_mm(mm) flush_cache_all_local()
#endif
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
static inline void
flush_page_to_ram(struct page *page)
{
}
extern void flush_cache_all_local(void);
#ifdef CONFIG_SMP
static inline void flush_cache_all(void)
{
smp_call_function((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
flush_cache_all_local();
}
#else
#define flush_cache_all flush_cache_all_local
#endif
/* The following value needs to be tuned and probably scaled with the
* cache size.
*/
#define FLUSH_THRESHOLD 0x80000
static inline void
flush_user_dcache_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
flush_user_dcache_range_asm(start,end);
#else
if ((end - start) < FLUSH_THRESHOLD)
flush_user_dcache_range_asm(start,end);
else
flush_data_cache();
#endif
}
static inline void
flush_user_icache_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
flush_user_icache_range_asm(start,end);
#else
if ((end - start) < FLUSH_THRESHOLD)
flush_user_icache_range_asm(start,end);
else
flush_instruction_cache();
#endif
}
static inline void flush_dcache_page(struct page *page)
{
if (page->mapping && list_empty(&page->mapping->i_mmap) &&
list_empty(&page->mapping->i_mmap_shared)) {
set_bit(PG_dcache_dirty, &page->flags);
} else {
flush_kernel_dcache_page(page_address(page));
}
}
#define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)
#define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
#define flush_icache_user_range(vma, page, addr, len) \
flush_icache_page((vma), (page))
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
int sr3;
if (!vma->vm_mm->context) {
BUG();
return;
}
sr3 = mfsp(3);
if (vma->vm_mm->context == sr3) {
flush_user_dcache_range(start,end);
flush_user_icache_range(start,end);
} else {
flush_cache_all();
}
}
static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
int sr3;
if (!vma->vm_mm->context) {
BUG();
return;
}
sr3 = mfsp(3);
if (vma->vm_mm->context == sr3) {
flush_user_dcache_range(vmaddr,vmaddr + PAGE_SIZE);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range(vmaddr,vmaddr + PAGE_SIZE);
} else {
if (vma->vm_flags & VM_EXEC)
flush_cache_all();
else
flush_data_cache();
}
}
#endif
...@@ -16,10 +16,9 @@ ...@@ -16,10 +16,9 @@
extern unsigned int csum_partial(const unsigned char *, int, unsigned int); extern unsigned int csum_partial(const unsigned char *, int, unsigned int);
/* /*
* the same as csum_partial, but copies from src while it * The same as csum_partial, but copies from src while it checksums.
* checksums
* *
* here even more important to align src and dst on a 32-bit (or even * Here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary * better 64-bit) boundary
*/ */
extern unsigned int csum_partial_copy_nocheck(const char *, char *, int, unsigned int); extern unsigned int csum_partial_copy_nocheck(const char *, char *, int, unsigned int);
...@@ -28,7 +27,7 @@ extern unsigned int csum_partial_copy_nocheck(const char *, char *, int, unsigne ...@@ -28,7 +27,7 @@ extern unsigned int csum_partial_copy_nocheck(const char *, char *, int, unsigne
* this is a new version of the above that records errors it finds in *errp, * this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer. * but continues and zeros the rest of the buffer.
*/ */
unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, unsigned int sum, int *errp); extern unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, unsigned int sum, int *errp);
/* /*
* Optimized for IP headers, which always checksum on 4 octet boundaries. * Optimized for IP headers, which always checksum on 4 octet boundaries.
...@@ -40,32 +39,31 @@ static inline unsigned short ip_fast_csum(unsigned char * iph, ...@@ -40,32 +39,31 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
unsigned int sum; unsigned int sum;
__asm__ __volatile__ (" __asm__ __volatile__ (
ldws,ma 4(%1), %0 " ldws,ma 4(%1), %0\n"
addi -4, %2, %2 " addi -4, %2, %2\n"
comib,>= 0, %2, 2f " comib,>= 0, %2, 2f\n"
"\n"
ldws,ma 4(%1), %%r19 " ldws,ma 4(%1), %%r19\n"
add %0, %%r19, %0 " add %0, %%r19, %0\n"
ldws,ma 4(%1), %%r19 " ldws,ma 4(%1), %%r19\n"
addc %0, %%r19, %0 " addc %0, %%r19, %0\n"
ldws,ma 4(%1), %%r19 " ldws,ma 4(%1), %%r19\n"
addc %0, %%r19, %0 " addc %0, %%r19, %0\n"
1: ldws,ma 4(%1), %%r19 "1: ldws,ma 4(%1), %%r19\n"
addib,<> -1, %2, 1b " addib,<> -1, %2, 1b\n"
addc %0, %%r19, %0 " addc %0, %%r19, %0\n"
addc %0, %%r0, %0 " addc %0, %%r0, %0\n"
"\n"
zdepi -1, 31, 16, %%r19 " zdepi -1, 31, 16, %%r19\n"
and %0, %%r19, %%r20 " and %0, %%r19, %%r20\n"
extru %0, 15, 16, %%r21 " extru %0, 15, 16, %%r21\n"
add %%r20, %%r21, %0 " add %%r20, %%r21, %0\n"
and %0, %%r19, %%r20 " and %0, %%r19, %%r20\n"
extru %0, 15, 16, %%r21 " extru %0, 15, 16, %%r21\n"
add %%r20, %%r21, %0 " add %%r20, %%r21, %0\n"
subi -1, %0, %0 " subi -1, %0, %0\n"
2: "2:\n"
"
: "=r" (sum), "=r" (iph), "=r" (ihl) : "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl) : "1" (iph), "2" (ihl)
: "r19", "r20", "r21" ); : "r19", "r20", "r21" );
...@@ -78,9 +76,12 @@ static inline unsigned short ip_fast_csum(unsigned char * iph, ...@@ -78,9 +76,12 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
*/ */
static inline unsigned int csum_fold(unsigned int sum) static inline unsigned int csum_fold(unsigned int sum)
{ {
sum = (sum & 0xffff) + (sum >> 16); /* add the swapped two 16-bit halves of sum,
sum = (sum & 0xffff) + (sum >> 16); a possible carry from adding the two 16-bit halves,
return ~sum; will carry from the lower half into the upper half,
giving us the correct sum in the upper half. */
sum += (sum << 16) + (sum >> 16);
return (~sum) >> 16;
} }
static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
...@@ -89,11 +90,11 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, ...@@ -89,11 +90,11 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
unsigned short proto, unsigned short proto,
unsigned int sum) unsigned int sum)
{ {
__asm__(" __asm__(
add %1, %0, %0 " add %1, %0, %0\n"
addc %2, %0, %0 " addc %2, %0, %0\n"
addc %3, %0, %0 " addc %3, %0, %0\n"
addc %%r0, %0, %0 " " addc %%r0, %0, %0\n"
: "=r" (sum) : "=r" (sum)
: "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum)); : "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum));
return sum; return sum;
...@@ -120,6 +121,7 @@ static inline unsigned short ip_compute_csum(unsigned char * buf, int len) { ...@@ -120,6 +121,7 @@ static inline unsigned short ip_compute_csum(unsigned char * buf, int len) {
return csum_fold (csum_partial(buf, len, 0)); return csum_fold (csum_partial(buf, len, 0));
} }
#define _HAVE_ARCH_IPV6_CSUM #define _HAVE_ARCH_IPV6_CSUM
static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
struct in6_addr *daddr, struct in6_addr *daddr,
...@@ -127,7 +129,62 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, ...@@ -127,7 +129,62 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
unsigned short proto, unsigned short proto,
unsigned int sum) unsigned int sum)
{ {
BUG(); __asm__ __volatile__ (
#if BITS_PER_LONG > 32
/*
** We can execute two loads and two adds per cycle on PA 8000.
** But add insn's get serialized waiting for the carry bit.
** Try to keep 4 registers with "live" values ahead of the ALU.
*/
" ldd,ma 8(%1), %%r19\n" /* get 1st saddr word */
" ldd,ma 8(%2), %%r20\n" /* get 1st daddr word */
" add %8, %3, %3\n"/* add 16-bit proto + len */
" add %%r19, %0, %0\n"
" ldd,ma 8(%1), %%r21\n" /* 2cd saddr */
" ldd,ma 8(%2), %%r22\n" /* 2cd daddr */
" add,dc %%r20, %0, %0\n"
" add,dc %%r21, %0, %0\n"
" add,dc %%r22, %0, %0\n"
" add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */
" extrd,u %0, 31, 32, %%r19\n" /* copy upper half down */
" depdi 0, 31, 32, %0\n" /* clear upper half */
" add %%r19, %0, %0\n" /* fold into 32-bits */
" addc 0, %0, %0\n" /* add carry */
#else
/*
** For PA 1.x, the insn order doesn't matter as much.
** Insn stream is serialized on the carry bit here too.
** result from the previous operation (eg r0 + x)
*/
" ldw,ma 4(%1), %%r19\n" /* get 1st saddr word */
" ldw,ma 4(%2), %%r20\n" /* get 1st daddr word */
" add %8, %3, %3\n" /* add 16-bit proto + len */
" add %%r19, %0, %0\n"
" ldw,ma 4(%1), %%r21\n" /* 2cd saddr */
" addc %%r20, %0, %0\n"
" ldw,ma 4(%2), %%r22\n" /* 2cd daddr */
" addc %%r21, %0, %0\n"
" ldw,ma 4(%1), %%r19\n" /* 3rd saddr */
" addc %%r22, %0, %0\n"
" ldw,ma 4(%2), %%r20\n" /* 3rd daddr */
" addc %%r19, %0, %0\n"
" ldw,ma 4(%1), %%r21\n" /* 4th saddr */
" addc %%r20, %0, %0\n"
" ldw,ma 4(%2), %%r22\n" /* 4th daddr */
" addc %%r21, %0, %0\n"
" addc %%r22, %0, %0\n"
" addc %3, %0, %0\n" /* fold in proto+len, catch carry */
#endif
: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len)
: "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto)
: "r19", "r20", "r21", "r22");
return csum_fold(sum); return csum_fold(sum);
} }
......
#ifndef _PARISC_CURRENT_H #ifndef _PARISC_CURRENT_H
#define _PARISC_CURRENT_H #define _PARISC_CURRENT_H
#include <asm/processor.h> #include <asm/thread_info.h>
struct task_struct; struct task_struct;
static inline struct task_struct * get_current(void) static inline struct task_struct * get_current(void)
{ {
struct task_struct *current; return current_thread_info()->task;
asm("copy 30,%0" : "=r" (current));
return (struct task_struct *)((long) current & ~(THREAD_SIZE-1));
} }
#define current get_current() #define current get_current()
......
...@@ -11,13 +11,11 @@ ...@@ -11,13 +11,11 @@
* Delay routines * Delay routines
*/ */
extern unsigned long loops_per_sec;
static __inline__ void __delay(unsigned long loops) { static __inline__ void __delay(unsigned long loops) {
asm volatile( asm volatile(
" .balignl 64,0x34000034 " .balignl 64,0x34000034\n"
addib,UV -1,%0,. " addib,UV -1,%0,.\n"
nop" " nop\n"
: "=r" (loops) : "0" (loops)); : "=r" (loops) : "0" (loops));
} }
......
/* $Id: dma.h,v 1.1 2002/07/20 15:52:25 rhirst Exp $
* linux/include/asm/dma.h: Defines for using and allocating dma channels.
* Written by Hennus Bergman, 1992.
* High DMA channel support & info by Hannu Savolainen
* and John Boyd, Nov. 1992.
* (c) Copyright 2000, Grant Grundler
*/
#ifndef _ASM_DMA_H
#define _ASM_DMA_H
#include <linux/config.h>
#include <asm/io.h> /* need byte IO */
#include <asm/system.h>
#define dma_outb outb
#define dma_inb inb
/*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
** (or rather not merge) DMA's into managable chunks.
** On parisc, this is more of the software/tuning constraint
** rather than the HW. I/O MMU allocation alogorithms can be
** faster with smaller size is (to some degree).
*/
#define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE)
/* The maximum address that we can perform a DMA transfer to on this platform
** New dynamic DMA interfaces should obsolete this....
*/
#define MAX_DMA_ADDRESS (~0UL)
/*
** We don't have DMA channels... well V-class does but the
** Dynamic DMA Mapping interface will support them... right? :^)
** Note: this is not relevant right now for PA-RISC, but we cannot
** leave this as undefined because some things (e.g. sound)
** won't compile :-(
*/
#define MAX_DMA_CHANNELS 8
#define DMA_MODE_READ 1
#define DMA_MODE_WRITE 2
#define DMA_AUTOINIT 0x10
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
/* DMA controller registers */
#define DMA1_CMD_REG 0x08 /* command register (w) */
#define DMA1_STAT_REG 0x08 /* status register (r) */
#define DMA1_REQ_REG 0x09 /* request register (w) */
#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
#define DMA1_MODE_REG 0x0B /* mode register (w) */
#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
#define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG)
#define DMA2_CMD_REG 0xD0 /* command register (w) */
#define DMA2_STAT_REG 0xD0 /* status register (r) */
#define DMA2_REQ_REG 0xD2 /* request register (w) */
#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
#define DMA2_MODE_REG 0xD6 /* mode register (w) */
#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG)
extern spinlock_t dma_spin_lock;
static __inline__ unsigned long claim_dma_lock(void)
{
unsigned long flags;
spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
static __inline__ void release_dma_lock(unsigned long flags)
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
}
/* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
* still in progress will return unpredictable results.
* If called before the channel has been used, it may return 1.
* Otherwise, it returns the number of _bytes_ left to transfer.
*
* Assumes DMA flip-flop is clear.
*/
static __inline__ int get_dma_residue(unsigned int dmanr)
{
unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
: ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
/* using short to get 16-bit wrap around */
unsigned short count;
count = 1 + dma_inb(io_port);
count += dma_inb(io_port) << 8;
return (dmanr<=3)? count : (count<<1);
}
/* enable/disable a specific DMA channel */
static __inline__ void enable_dma(unsigned int dmanr)
{
#ifdef CONFIG_SUPERIO
if (dmanr<=3)
dma_outb(dmanr, DMA1_MASK_REG);
else
dma_outb(dmanr & 3, DMA2_MASK_REG);
#endif
}
static __inline__ void disable_dma(unsigned int dmanr)
{
#ifdef CONFIG_SUPERIO
if (dmanr<=3)
dma_outb(dmanr | 4, DMA1_MASK_REG);
else
dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
#endif
}
/* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
* Use this once to initialize the FF to a known state.
* After that, keep track of it. :-)
* --- In order to do that, the DMA routines below should ---
* --- only be used while holding the DMA lock ! ---
*/
static __inline__ void clear_dma_ff(unsigned int dmanr)
{
}
/* set mode (above) for a specific DMA channel */
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
{
}
/* Set only the page register bits of the transfer address.
* This is used for successive transfers when we know the contents of
* the lower 16 bits of the DMA current address register, but a 64k boundary
* may have been crossed.
*/
static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
{
}
/* Set transfer address & page bits for specific DMA channel.
* Assumes dma flipflop is clear.
*/
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
{
}
/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
* a specific DMA channel.
* You must ensure the parameters are valid.
* NOTE: from a manual: "the number of transfers is one more
* than the initial word count"! This is taken into account.
* Assumes dma flip-flop is clear.
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
*/
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
{
}
/* These are in kernel/dma.c: */
extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
extern void free_dma(unsigned int dmanr); /* release it again */
extern int get_dma_list(char *buf); /* proc/dma support */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_DMA_H */
/*
* eisa_bus.h interface between the eisa BA driver and the bus enumerator
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Copyright (c) 2002 Daniel Engstrom <5116@telia.com>
*
*/
#ifndef ASM_EISA_H
#define ASM_EISA_H
extern void eisa_make_irq_level(int num);
extern void eisa_make_irq_edge(int num);
extern int eisa_enumerator(unsigned long eeprom_addr,
struct resource *io_parent,
struct resource *mem_parent);
extern int eisa_eeprom_init(unsigned long addr);
#endif
/*
* eisa_eeprom.h - provide support for EISA adapters in PA-RISC machines
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Copyright (c) 2001, 2002 Daniel Engstrom <5116@telia.com>
*
*/
#ifndef ASM_EISA_EEPROM_H
#define ASM_EISA_EEPROM_H
#define HPEE_MAX_LENGTH 0x2000 /* maximum eeprom length */
#define HPEE_SLOT_INFO(slot) (20+(48*slot))
struct eeprom_header
{
u_int32_t num_writes; /* number of writes */
u_int8_t flags; /* flags, usage? */
u_int8_t ver_maj;
u_int8_t ver_min;
u_int8_t num_slots; /* number of EISA slots in system */
u_int16_t csum; /* checksum, I dont know how to calulate this */
u_int8_t pad[10];
} __attribute__ ((packed));
struct eeprom_eisa_slot_info
{
u_int32_t eisa_slot_id;
u_int32_t config_data_offset;
u_int32_t num_writes;
u_int16_t csum;
u_int16_t num_functions;
u_int16_t config_data_length;
/* bits 0..3 are the duplicate slot id */
#define HPEE_SLOT_INFO_EMBEDDED 0x10
#define HPEE_SLOT_INFO_VIRTUAL 0x20
#define HPEE_SLOT_INFO_NO_READID 0x40
#define HPEE_SLOT_INFO_DUPLICATE 0x80
u_int8_t slot_info;
#define HPEE_SLOT_FEATURES_ENABLE 0x01
#define HPEE_SLOT_FEATURES_IOCHK 0x02
#define HPEE_SLOT_FEATURES_CFG_INCOMPLETE 0x80
u_int8_t slot_features;
u_int8_t ver_min;
u_int8_t ver_maj;
#define HPEE_FUNCTION_INFO_HAVE_TYPE 0x01
#define HPEE_FUNCTION_INFO_HAVE_MEMORY 0x02
#define HPEE_FUNCTION_INFO_HAVE_IRQ 0x04
#define HPEE_FUNCTION_INFO_HAVE_DMA 0x08
#define HPEE_FUNCTION_INFO_HAVE_PORT 0x10
#define HPEE_FUNCTION_INFO_HAVE_PORT_INIT 0x20
/* I think there are two slighty different
* versions of the function_info field
* one int the fixed header and one optional
* in the parsed slot data area */
#define HPEE_FUNCTION_INFO_HAVE_FUNCTION 0x01
#define HPEE_FUNCTION_INFO_F_DISABLED 0x80
#define HPEE_FUNCTION_INFO_CFG_FREE_FORM 0x40
u_int8_t function_info;
#define HPEE_FLAG_BOARD_IS_ISA 0x01 /* flag and minor version for isa board */
u_int8_t flags;
u_int8_t pad[24];
} __attribute__ ((packed));
#define HPEE_MEMORY_MAX_ENT 9
/* memory descriptor: byte 0 */
#define HPEE_MEMORY_WRITABLE 0x01
#define HPEE_MEMORY_CACHABLE 0x02
#define HPEE_MEMORY_TYPE_MASK 0x18
#define HPEE_MEMORY_TYPE_SYS 0x00
#define HPEE_MEMORY_TYPE_EXP 0x08
#define HPEE_MEMORY_TYPE_VIR 0x10
#define HPEE_MEMORY_TYPE_OTH 0x18
#define HPEE_MEMORY_SHARED 0x20
#define HPEE_MEMORY_MORE 0x80
/* memory descriptor: byte 1 */
#define HPEE_MEMORY_WIDTH_MASK 0x03
#define HPEE_MEMORY_WIDTH_BYTE 0x00
#define HPEE_MEMORY_WIDTH_WORD 0x01
#define HPEE_MEMORY_WIDTH_DWORD 0x02
#define HPEE_MEMORY_DECODE_MASK 0x0c
#define HPEE_MEMORY_DECODE_20BITS 0x00
#define HPEE_MEMORY_DECODE_24BITS 0x04
#define HPEE_MEMORY_DECODE_32BITS 0x08
/* byte 2 and 3 are a 16bit LE value
* containging the memory size in kilobytes */
/* byte 4,5,6 are a 24bit LE value
* containing the memory base address */
#define HPEE_IRQ_MAX_ENT 7
/* Interrupt entry: byte 0 */
#define HPEE_IRQ_CHANNEL_MASK 0xf
#define HPEE_IRQ_TRIG_LEVEL 0x20
#define HPEE_IRQ_MORE 0x80
/* byte 1 seems to be unused */
#define HPEE_DMA_MAX_ENT 4
/* dma entry: byte 0 */
#define HPEE_DMA_CHANNEL_MASK 7
#define HPEE_DMA_SIZE_MASK 0xc
#define HPEE_DMA_SIZE_BYTE 0x0
#define HPEE_DMA_SIZE_WORD 0x4
#define HPEE_DMA_SIZE_DWORD 0x8
#define HPEE_DMA_SHARED 0x40
#define HPEE_DMA_MORE 0x80
/* dma entry: byte 1 */
#define HPEE_DMA_TIMING_MASK 0x30
#define HPEE_DMA_TIMING_ISA 0x0
#define HPEE_DMA_TIMING_TYPEA 0x10
#define HPEE_DMA_TIMING_TYPEB 0x20
#define HPEE_DMA_TIMING_TYPEC 0x30
#define HPEE_PORT_MAX_ENT 20
/* port entry byte 0 */
#define HPEE_PORT_SIZE_MASK 0x1f
#define HPEE_PORT_SHARED 0x40
#define HPEE_PORT_MORE 0x80
/* byte 1 and 2 is a 16bit LE value
* conating the start port number */
#define HPEE_PORT_INIT_MAX_LEN 60 /* in bytes here */
/* port init entry byte 0 */
#define HPEE_PORT_INIT_WIDTH_MASK 0x3
#define HPEE_PORT_INIT_WIDTH_BYTE 0x0
#define HPEE_PORT_INIT_WIDTH_WORD 0x1
#define HPEE_PORT_INIT_WIDTH_DWORD 0x2
#define HPEE_PORT_INIT_MASK 0x4
#define HPEE_PORT_INIT_MORE 0x80
#define HPEE_SELECTION_MAX_ENT 26
#define HPEE_TYPE_MAX_LEN 80
#endif
...@@ -9,19 +9,13 @@ ...@@ -9,19 +9,13 @@
#define EM_PARISC 15 #define EM_PARISC 15
#define ELF_NGREG 32 /*
#define ELF_NFPREG 32 * The following definitions are those for 32-bit ELF binaries on a 32-bit kernel
* and for 64-bit binaries on a 64-bit kernel. To run 32-bit binaries on a 64-bit
typedef unsigned long elf_greg_t; * kernel, arch/parisc64/kernel/binfmt_elf32.c defines these macros appropriately
typedef elf_greg_t elf_gregset_t[ELF_NGREG]; * and then #includes binfmt_elf.c, which then includes this file.
*/
typedef double elf_fpreg_t; #ifndef ELF_CLASS
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_CORE_COPY_REGS(gregs, regs) \
memcpy(gregs, regs, \
sizeof(struct pt_regs) < sizeof(elf_gregset_t)? \
sizeof(struct pt_regs): sizeof(elf_gregset_t));
/* /*
* This is used to ensure we don't load something for the wrong architecture. * This is used to ensure we don't load something for the wrong architecture.
...@@ -30,16 +24,84 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; ...@@ -30,16 +24,84 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
* the following macros are for the default case. However, for the 64 * the following macros are for the default case. However, for the 64
* bit kernel we also support 32 bit parisc binaries. To do that * bit kernel we also support 32 bit parisc binaries. To do that
* arch/parisc64/kernel/binfmt_elf32.c defines its own set of these * arch/parisc64/kernel/binfmt_elf32.c defines its own set of these
* macros, and then if includes fs/binfmt_elf.c to provide an alternate * macros, and then it includes fs/binfmt_elf.c to provide an alternate
* elf binary handler for 32 bit binaries (on the 64 bit kernel). * elf binary handler for 32 bit binaries (on the 64 bit kernel).
*/ */
#ifdef __LP64__ #ifdef __LP64__
#define ELF_CLASS ELFCLASS64 #define ELF_CLASS ELFCLASS64
#else #else
#define ELF_CLASS ELFCLASS32 #define ELF_CLASS ELFCLASS32
#endif #endif
typedef unsigned long elf_greg_t;
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM ("PARISC\0" /*+((boot_cpu_data.x86-3)*5) */)
#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) \
current->personality = PER_LINUX
#endif
/*
* Fill in general registers in a core dump. This saves pretty
* much the same registers as hp-ux, although in a different order.
* Registers marked # below are not currently saved in pt_regs, so
* we use their current values here.
*
* gr0..gr31
* sr0..sr7
* iaoq0..iaoq1
* iasq0..iasq1
* cr11 (sar)
* cr19 (iir)
* cr20 (isr)
* cr21 (ior)
* # cr22 (ipsw)
* # cr0 (recovery counter)
* # cr24..cr31 (temporary registers)
* # cr8,9,12,13 (protection IDs)
* # cr10 (scr/ccr)
* # cr15 (ext int enable mask)
*
*/
#define ELF_CORE_COPY_REGS(dst, pt) \
memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \
memcpy(dst + 0, pt->gr, 32 * sizeof(elf_greg_t)); \
memcpy(dst + 32, pt->sr, 8 * sizeof(elf_greg_t)); \
memcpy(dst + 40, pt->iaoq, 2 * sizeof(elf_greg_t)); \
memcpy(dst + 42, pt->iasq, 2 * sizeof(elf_greg_t)); \
dst[44] = pt->sar; dst[45] = pt->iir; \
dst[46] = pt->isr; dst[47] = pt->ior; \
dst[48] = mfctl(22); dst[49] = mfctl(0); \
dst[50] = mfctl(24); dst[51] = mfctl(25); \
dst[52] = mfctl(26); dst[53] = mfctl(27); \
dst[54] = mfctl(28); dst[55] = mfctl(29); \
dst[56] = mfctl(30); dst[57] = mfctl(31); \
dst[58] = mfctl( 8); dst[59] = mfctl( 9); \
dst[60] = mfctl(12); dst[61] = mfctl(13); \
dst[62] = mfctl(10); dst[63] = mfctl(15);
#endif /* ! ELF_CLASS */
#define ELF_NGREG 80 /* We only need 64 at present, but leave space
for expansion. */
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
#define ELF_NFPREG 32
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
struct pt_regs; /* forward declaration... */
#define elf_check_arch(x) ((x)->e_machine == EM_PARISC && (x)->e_ident[EI_CLASS] == ELF_CLASS) #define elf_check_arch(x) ((x)->e_machine == EM_PARISC && (x)->e_ident[EI_CLASS] == ELF_CLASS)
/* /*
...@@ -80,18 +142,4 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; ...@@ -80,18 +142,4 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_HWCAP 0 #define ELF_HWCAP 0
/* (boot_cpu_data.x86_capability) */ /* (boot_cpu_data.x86_capability) */
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM ("PARISC\0" /*+((boot_cpu_data.x86-3)*5) */)
#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) \
current->personality = PER_LINUX
#endif
#endif #endif
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#define O_DIRECT 00040000 /* direct disk access hint - currently ignored */ #define O_DIRECT 00040000 /* direct disk access hint - currently ignored */
#define O_DIRECTORY 00010000 /* must be a directory */ #define O_DIRECTORY 00010000 /* must be a directory */
#define O_NOFOLLOW 00000200 /* don't follow links */ #define O_NOFOLLOW 00000200 /* don't follow links */
#define O_INVISIBLE 04000000 /* invisible I/O, for DMAPI/XDSM */
#define F_DUPFD 0 /* dup */ #define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get f_flags */ #define F_GETFD 1 /* get f_flags */
......
#ifndef _ASM_FIXMAP_H #ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H #define _ASM_FIXMAP_H
#define FIXADDR_TOP (0xffffe000UL) /*
#define FIXADDR_SIZE (0 << PAGE_SHIFT) * Allocate a 8 Mb temporary mapping area for copy_user_page/clear_user_page.
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) * This area needs to be aligned on a 8 Mb boundary.
*/
#define TMPALIAS_MAP_START (__PAGE_OFFSET - 0x01000000)
#define FIXADDR_START ((unsigned long)TMPALIAS_MAP_START)
#endif #endif
/*
* Architecture specific parts of the Floppy driver
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995
*/
#ifndef __ASM_PARISC_FLOPPY_H
#define __ASM_PARISC_FLOPPY_H
#include <linux/vmalloc.h>
/*
* The DMA channel used by the floppy controller cannot access data at
* addresses >= 16MB
*
* Went back to the 1MB limit, as some people had problems with the floppy
* driver otherwise. It doesn't matter much for performance anyway, as most
* floppy accesses go through the track buffer.
*/
#define _CROSS_64KB(a,s,vdma) \
(!vdma && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
#define SW fd_routine[use_virtual_dma&1]
#define CSW fd_routine[can_use_virtual_dma & 1]
#define fd_inb(port) readb(port)
#define fd_outb(value, port) writeb(value, port)
#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
#define FLOPPY_CAN_FALLBACK_ON_NODMA
static int virtual_dma_count=0;
static int virtual_dma_residue=0;
static char *virtual_dma_addr=0;
static int virtual_dma_mode=0;
static int doing_pdma=0;
static void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
{
register unsigned char st;
#undef TRACE_FLPY_INT
#ifdef TRACE_FLPY_INT
static int calls=0;
static int bytes=0;
static int dma_wait=0;
#endif
if (!doing_pdma) {
floppy_interrupt(irq, dev_id, regs);
return;
}
#ifdef TRACE_FLPY_INT
if(!calls)
bytes = virtual_dma_count;
#endif
{
register int lcount;
register char *lptr = virtual_dma_addr;
for (lcount = virtual_dma_count; lcount; lcount--) {
st = fd_inb(virtual_dma_port+4) & 0xa0 ;
if (st != 0xa0)
break;
if (virtual_dma_mode) {
fd_outb(*lptr, virtual_dma_port+5);
} else {
*lptr = fd_inb(virtual_dma_port+5);
}
lptr++;
}
virtual_dma_count = lcount;
virtual_dma_addr = lptr;
st = fd_inb(virtual_dma_port+4);
}
#ifdef TRACE_FLPY_INT
calls++;
#endif
if (st == 0x20)
return;
if (!(st & 0x20)) {
virtual_dma_residue += virtual_dma_count;
virtual_dma_count = 0;
#ifdef TRACE_FLPY_INT
printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
virtual_dma_count, virtual_dma_residue, calls, bytes,
dma_wait);
calls = 0;
dma_wait=0;
#endif
doing_pdma = 0;
floppy_interrupt(irq, dev_id, regs);
return;
}
#ifdef TRACE_FLPY_INT
if (!virtual_dma_count)
dma_wait++;
#endif
}
static void fd_disable_dma(void)
{
if(! (can_use_virtual_dma & 1))
disable_dma(FLOPPY_DMA);
doing_pdma = 0;
virtual_dma_residue += virtual_dma_count;
virtual_dma_count=0;
}
static int vdma_request_dma(unsigned int dmanr, const char * device_id)
{
return 0;
}
static void vdma_nop(unsigned int dummy)
{
}
static int vdma_get_dma_residue(unsigned int dummy)
{
return virtual_dma_count + virtual_dma_residue;
}
static int fd_request_irq(void)
{
if(can_use_virtual_dma)
return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
"floppy", NULL);
else
return request_irq(FLOPPY_IRQ, floppy_interrupt,
SA_INTERRUPT|SA_SAMPLE_RANDOM,
"floppy", NULL);
}
static unsigned long dma_mem_alloc(unsigned long size)
{
return __get_dma_pages(GFP_KERNEL, get_order(size));
}
static unsigned long vdma_mem_alloc(unsigned long size)
{
return (unsigned long) vmalloc(size);
}
#define nodma_mem_alloc(size) vdma_mem_alloc(size)
static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
{
if((unsigned int) addr >= (unsigned int) high_memory)
return vfree((void *)addr);
else
free_pages(addr, get_order(size));
}
#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
static void _fd_chose_dma_mode(char *addr, unsigned long size)
{
if(can_use_virtual_dma == 2) {
if((unsigned int) addr >= (unsigned int) high_memory ||
virt_to_bus(addr) >= 0x1000000 ||
_CROSS_64KB(addr, size, 0))
use_virtual_dma = 1;
else
use_virtual_dma = 0;
} else {
use_virtual_dma = can_use_virtual_dma & 1;
}
}
#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
{
doing_pdma = 1;
virtual_dma_port = io;
virtual_dma_mode = (mode == DMA_MODE_WRITE);
virtual_dma_addr = addr;
virtual_dma_count = size;
virtual_dma_residue = 0;
return 0;
}
static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
{
#ifdef FLOPPY_SANITY_CHECK
if (CROSS_64KB(addr, size)) {
printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
return -1;
}
#endif
/* actual, physical DMA */
doing_pdma = 0;
clear_dma_ff(FLOPPY_DMA);
set_dma_mode(FLOPPY_DMA,mode);
set_dma_addr(FLOPPY_DMA,virt_to_bus(addr));
set_dma_count(FLOPPY_DMA,size);
enable_dma(FLOPPY_DMA);
return 0;
}
struct fd_routine_l {
int (*_request_dma)(unsigned int dmanr, const char * device_id);
void (*_free_dma)(unsigned int dmanr);
int (*_get_dma_residue)(unsigned int dummy);
unsigned long (*_dma_mem_alloc) (unsigned long size);
int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
} fd_routine[] = {
{
request_dma,
free_dma,
get_dma_residue,
dma_mem_alloc,
hard_dma_setup
},
{
vdma_request_dma,
vdma_nop,
vdma_get_dma_residue,
vdma_mem_alloc,
vdma_dma_setup
}
};
static int FDC1 = 0x3f0; /* Lies. Floppy controller is memory mapped, not io mapped */
static int FDC2 = -1;
#define FLOPPY0_TYPE 0
#define FLOPPY1_TYPE 0
#define N_FDC 1
#define N_DRIVE 8
#define FLOPPY_MOTOR_MASK 0xf0
#define AUTO_DMA
#define EXTRA_FLOPPY_PARAMS
#endif /* __ASM_PARISC_FLOPPY_H */
/*
* Architecture specific parts of HP's STI (framebuffer) driver
* structures are HP-UX compatible for XFree86 usage
*/
#ifndef __ASM_PARISC_GRFIOCTL_H
#define __ASM_PARISC_GRFIOCTL_H
/* upper 32 bits of graphics id (HP/UX identifier) */
#define GRFGATOR 8
#define S9000_ID_S300 9
#define GRFBOBCAT 9
#define GRFCATSEYE 9
#define S9000_ID_98720 10
#define GRFRBOX 10
#define S9000_ID_98550 11
#define GRFFIREEYE 11
#define S9000_ID_A1096A 12
#define GRFHYPERION 12
#define S9000_ID_FRI 13
#define S9000_ID_98730 14
#define GRFDAVINCI 14
#define S9000_ID_98705 0x26C08070 /* Tigershark */
#define S9000_ID_98736 0x26D148AB
#define S9000_ID_A1659A 0x26D1482A /* CRX 8 plane color (=ELK) */
#define S9000_ID_ELK S9000_ID_A1659A
#define S9000_ID_A1439A 0x26D148EE /* CRX24 = CRX+ (24-plane color) */
#define S9000_ID_A1924A 0x26D1488C /* GRX gray-scale */
#define S9000_ID_ELM S9000_ID_A1924A
#define S9000_ID_98765 0x27480DEF
#define S9000_ID_ELK_768 0x27482101
#define S9000_ID_STINGER 0x27A4A402
#define S9000_ID_TIMBER 0x27F12392 /* Bushmaster (710) Graphics */
#define S9000_ID_TOMCAT 0x27FCCB6D /* dual-headed ELK (Dual CRX) */
#define S9000_ID_ARTIST 0x2B4DED6D /* Artist (Gecko/712 & 715) onboard Graphics */
#define S9000_ID_HCRX 0x2BCB015A /* Hyperdrive/Hyperbowl (A4071A) Graphics */
#define CRX24_OVERLAY_PLANES 0x920825AA /* Overlay planes on CRX24 */
#define CRT_ID_ELK_1024 S9000_ID_ELK_768 /* Elk 1024x768 CRX */
#define CRT_ID_ELK_1280 S9000_ID_A1659A /* Elk 1280x1024 CRX */
#define CRT_ID_ELK_1024DB 0x27849CA5 /* Elk 1024x768 double buffer */
#define CRT_ID_ELK_GS S9000_ID_A1924A /* Elk 1280x1024 GreyScale */
#define CRT_ID_CRX24 S9000_ID_A1439A /* Piranha */
#define CRT_ID_VISUALIZE_EG 0x2D08C0A7 /* Graffiti (built-in B132+/B160L) */
#define CRT_ID_THUNDER 0x2F23E5FC /* Thunder 1 VISUALIZE 48*/
#define CRT_ID_THUNDER2 0x2F8D570E /* Thunder 2 VISUALIZE 48 XP*/
#define CRT_ID_HCRX S9000_ID_HCRX /* Hyperdrive HCRX */
#define CRT_ID_CRX48Z S9000_ID_STINGER /* Stinger */
#define CRT_ID_DUAL_CRX S9000_ID_TOMCAT /* Tomcat */
#define CRT_ID_PVRX S9000_ID_98705 /* Tigershark */
#define CRT_ID_TIMBER S9000_ID_TIMBER /* Timber (710 builtin) */
#define CRT_ID_TVRX S9000_ID_98765 /* TVRX (gto/falcon) */
#define CRT_ID_ARTIST S9000_ID_ARTIST /* Artist */
#define CRT_ID_SUMMIT 0x2FC1066B /* Summit FX2, FX4, FX6 ... */
/* structure for ioctl(GCDESCRIBE) */
#define gaddr_t unsigned long /* FIXME: PA2.0 (64bit) portable ? */
struct grf_fbinfo {
unsigned int id; /* upper 32 bits of graphics id */
unsigned int mapsize; /* mapped size of framebuffer */
unsigned int dwidth, dlength;/* x and y sizes */
unsigned int width, length; /* total x and total y size */
unsigned int xlen; /* x pitch size */
unsigned int bpp, bppu; /* bits per pixel and used bpp */
unsigned int npl, nplbytes; /* # of planes and bytes per plane */
char name[32]; /* name of the device (from ROM) */
unsigned int attr; /* attributes */
gaddr_t fbbase, regbase;/* framebuffer and register base addr */
gaddr_t regions[6]; /* region bases */
};
#define GCID _IOR('G', 0, int)
#define GCON _IO('G', 1)
#define GCOFF _IO('G', 2)
#define GCAON _IO('G', 3)
#define GCAOFF _IO('G', 4)
#define GCMAP _IOWR('G', 5, int)
#define GCUNMAP _IOWR('G', 6, int)
#define GCMAP_HPUX _IO('G', 5)
#define GCUNMAP_HPUX _IO('G', 6)
#define GCLOCK _IO('G', 7)
#define GCUNLOCK _IO('G', 8)
#define GCLOCK_MINIMUM _IO('G', 9)
#define GCUNLOCK_MINIMUM _IO('G', 10)
#define GCSTATIC_CMAP _IO('G', 11)
#define GCVARIABLE_CMAP _IO('G', 12)
#define GCTERM _IOWR('G',20,int) /* multi-headed Tomcat */
#define GCDESCRIBE _IOR('G', 21, struct grf_fbinfo)
#define GCFASTLOCK _IO('G', 26)
#endif /* __ASM_PARISC_GRFIOCTL_H */
...@@ -3,49 +3,17 @@ ...@@ -3,49 +3,17 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/types.h> #include <linux/types.h>
#include <asm/hardware.h> /* for struct hp_device */ #include <asm/io.h> /* temporary for __raw_{read,write} */
/* /* Please, call ioremap and use {read,write}[bwl] instead. These functions
* The convention used for inb/outb etc. is that names starting with * are not very fast.
* two underscores are the inline versions, names starting with a
* single underscore are proper functions, and names starting with a
* letter are macros that map in some way to inline or proper function
* versions. Not all that pretty, but before you change it, be sure
* to convince yourself that it won't break anything (in particular
* module support).
*/ */
extern u8 _gsc_readb(void *); #define gsc_readb(x) __raw_readb((unsigned long)x)
extern u16 _gsc_readw(void *); #define gsc_readw(x) __raw_readw((unsigned long)x)
extern u32 _gsc_readl(void *); #define gsc_readl(x) __raw_readl((unsigned long)x)
extern u64 _gsc_readq(void *); #define gsc_writeb(x, y) __raw_writeb(x, (unsigned long)y)
extern void _gsc_writeb(u8, void *); #define gsc_writew(x, y) __raw_writew(x, (unsigned long)y)
extern void _gsc_writew(u16,void *); #define gsc_writel(x, y) __raw_writel(x, (unsigned long)y)
extern void _gsc_writel(u32,void *);
extern void _gsc_writeq(u64,void *);
#define gsc_readb(a) _gsc_readb((void *)(a))
#define gsc_readw(a) _gsc_readw((void *)(a))
#define gsc_readl(a) _gsc_readl((void *)(a))
#define gsc_readq(a) _gsc_readq((void *)(a))
#define gsc_writeb(v,a) _gsc_writeb((v),(void *)(a))
#define gsc_writew(v,a) _gsc_writew((v),(void *)(a))
#define gsc_writel(v,a) _gsc_writel((v),(void *)(a))
#define gsc_writeq(v,a) _gsc_writeq((v),(void *)(a))
struct gsc_dev {
struct gsc_bus *bus; /* bus this device is on */
struct gsc_dev *next; /* chain of all devices */
struct gsc_dev *next_bus; /* chain of all devices on a bus */
struct gsc_dev *next_submod; /* chain of all devices on a module */
unsigned irq; /* irq generated by this device */
void *hpa; /* hard physical address */
u16 hversion;
u8 spa; /* SPA requirements */
u8 type;
u32 sversion;
};
struct gsc_irq { struct gsc_irq {
unsigned long txn_addr; /* IRQ "target" */ unsigned long txn_addr; /* IRQ "target" */
...@@ -59,21 +27,5 @@ struct gsc_irq { ...@@ -59,21 +27,5 @@ struct gsc_irq {
extern int gsc_alloc_irq(struct gsc_irq *dev); /* dev needs an irq */ extern int gsc_alloc_irq(struct gsc_irq *dev); /* dev needs an irq */
extern int gsc_claim_irq(struct gsc_irq *dev, int irq); /* dev needs this irq */ extern int gsc_claim_irq(struct gsc_irq *dev, int irq); /* dev needs this irq */
struct gsc_bus {
void *hpa; /* HPA of device 0, function 0 of this bus */
};
/*
* There is one gsc_dev structure for each slot-number/function-number
* combination:
*/
struct gsc_dev *gsc_find_device(u16 hversion, struct gsc_dev *from);
extern void probe_serial_gsc(void);
/* returns a virtual irq for device at dev->hpa (works for all LASI/ASP/WAX) */
extern int busdevice_alloc_irq( struct hp_device *dev );
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* LINUX_GSC_H */ #endif /* LINUX_GSC_H */
/* hardirq.h: 32-bit Sparc hard IRQ support. /* hardirq.h: PA-RISC hard IRQ support.
* *
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
* Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au) *
* The locking is really quite interesting. There's a cpu-local
* count of how many interrupts are being handled, and a global
* lock. An interrupt can only be serviced if the global lock
* is free. You can't be sure no more interrupts are being
* serviced until you've acquired the lock and then checked
* all the per-cpu interrupt counts are all zero. It's a specialised
* br_lock, and that's exactly how Sparc does it. We don't because
* it's more locking for us. This way is lock-free in the interrupt path.
*/ */
#ifndef __PARISC_HARDIRQ_H #ifndef _PARISC_HARDIRQ_H
#define __PARISC_HARDIRQ_H #define _PARISC_HARDIRQ_H
#include <linux/config.h> #include <linux/config.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/cache.h>
typedef struct { typedef struct {
unsigned int __softirq_active; unsigned long __softirq_pending; /* set_bit is used on this */
unsigned int __softirq_mask;
unsigned int __local_irq_count;
unsigned int __local_bh_count;
unsigned int __syscall_count; unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task;
unsigned long idle_timestamp;
} ____cacheline_aligned irq_cpustat_t; } ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/* /*
* Are we in an interrupt context? Either doing bottom half * We put the hardirq and softirq counter into the preemption counter. The bitmask has the
* or hardware interrupt processing? * following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-31 are the hardirq count (max # of hardirqs: 65536)
*
* - (bit 63 is the PREEMPT_ACTIVE flag---not currently implemented.)
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0xffff0000
*/ */
#define in_interrupt() ({ int __cpu = smp_processor_id(); \
(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
#define in_irq() ({ int __cpu = smp_processor_id(); \ #define PREEMPT_BITS 8
(local_irq_count(__cpu) != 0); }) #define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 16
#ifndef CONFIG_SMP #define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) #define __MASK(x) ((1UL << (x))-1)
#define hardirq_endlock(cpu) do { } while (0)
#define irq_enter(cpu, irq) (local_irq_count(cpu)++) #define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define irq_exit(cpu, irq) (local_irq_count(cpu)--) #define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define synchronize_irq() barrier() #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#else #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
/*
* The hardirq mask has to be large enough to have space for potentially all IRQ sources
* in the system nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context?
* Interrupt context?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#include <asm/atomic.h> #define hardirq_trylock() (!in_interrupt())
#include <linux/spinlock.h> #define hardirq_endlock() do { } while (0)
#include <asm/system.h>
#include <asm/smp.h>
extern unsigned char global_irq_holder;
extern spinlock_t global_irq_lock;
extern atomic_t global_irq_count;
static inline void release_irqlock(int cpu)
{
/* if we didn't own the irq lock, just ignore.. */
if (global_irq_holder == (unsigned char) cpu) {
global_irq_holder = NO_PROC_ID;
spin_unlock(&global_irq_lock);
}
}
static inline void irq_enter(int cpu)
{
++local_irq_count(cpu);
atomic_inc(&global_irq_count);
}
static inline void irq_exit(int cpu)
{
atomic_dec(&global_irq_count);
--local_irq_count(cpu);
}
static inline int hardirq_trylock(int cpu)
{
return (! atomic_read(&global_irq_count) &&
! spin_is_locked (&global_irq_lock));
}
#define hardirq_endlock(cpu) do { } while (0)
extern void synchronize_irq(void);
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#if CONFIG_PREEMPT
# error CONFIG_PREEMT currently not supported.
# define in_atomic() BUG()
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define in_atomic() (preempt_count() != 0)
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#ifdef CONFIG_SMP
extern void synchronize_irq (unsigned int irq);
#else
# define synchronize_irq(irq) barrier()
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#endif /* __PARISC_HARDIRQ_H */ #endif /* _PARISC_HARDIRQ_H */
#ifndef _PARISC_HP_MACHINES_H_ #ifndef _PARISC_HARDWARE_H
#define _PARISC_HP_MACHINES_H_ #define _PARISC_HARDWARE_H
#include <asm/pdc.h>
struct parisc_device_id {
unsigned char hw_type; /* 5 bits used */
unsigned char hversion_rev; /* 4 bits */
unsigned short hversion; /* 12 bits */
unsigned int sversion; /* 20 bits */
};
#define HWTYPE_ANY_ID 0xff
#define HVERSION_REV_ANY_ID 0xff
#define HVERSION_ANY_ID 0xffff
#define SVERSION_ANY_ID 0xffffffffU
struct hp_hardware { struct hp_hardware {
unsigned short hw_type:5; /* HPHW_xxx */ unsigned short hw_type:5; /* HPHW_xxx */
unsigned short hversion; unsigned short hversion;
unsigned long sversion:28; unsigned long sversion:28;
unsigned short opt; unsigned short opt;
char *name; const char name[80]; /* The hardware description */
}; };
struct hp_device { struct parisc_device {
unsigned short hw_type:5; /* HPHW_xxx */ unsigned long hpa; /* Hard Physical Address */
unsigned short hversion; /* HP-UX uses hv_model:12 */ struct parisc_device_id id;
unsigned int sversion; /* HP-UX uses sv_model:20 sv_opt:8 */ struct parisc_device *parent;
unsigned short opt; struct parisc_device *sibling;
unsigned int hversion_rev; struct parisc_device *child;
unsigned int sversion_rev; struct parisc_driver *driver; /* Driver for this device */
struct hp_hardware * reference; /* This is a pointer to the void *sysdata; /* Driver instance private data */
reference */ char name[80]; /* The hardware description */
unsigned int managed; /* this is if the device has a driver for it */ int irq;
void * hpa;
char hw_path; /* The module number on this bus */
unsigned int num_addrs; /* some devices have additional address ranges. */
unsigned long *addr; /* which will be stored here */
#ifdef __LP64__ #ifdef __LP64__
/* parms for pdc_pat_cell_module() call */ /* parms for pdc_pat_cell_module() call */
...@@ -29,7 +46,6 @@ struct hp_device { ...@@ -29,7 +46,6 @@ struct hp_device {
/* generic info returned from pdc_pat_cell_module() */ /* generic info returned from pdc_pat_cell_module() */
unsigned long mod_info; /* PAT specific - Misc Module info */ unsigned long mod_info; /* PAT specific - Misc Module info */
unsigned long pmod_loc; /* physical Module location */ unsigned long pmod_loc; /* physical Module location */
unsigned long mod_path; /* Module HW path */
#endif #endif
}; };
...@@ -43,33 +59,56 @@ enum cpu_type { ...@@ -43,33 +59,56 @@ enum cpu_type {
pcxu = 6, /* pa8000 pa 2.0 */ pcxu = 6, /* pa8000 pa 2.0 */
pcxu_ = 7, /* pa8200 (u+) pa 2.0 */ pcxu_ = 7, /* pa8200 (u+) pa 2.0 */
pcxw = 8, /* pa8500 pa 2.0 */ pcxw = 8, /* pa8500 pa 2.0 */
pcxw_ = 9 /* pa8600 (w+) pa 2.0 */ pcxw_ = 9, /* pa8600 (w+) pa 2.0 */
pcxw2 = 10 /* pa8700 pa 2.0 */
}; };
extern char *cpu_name_version[][2]; /* mapping from enum cpu_type to strings */ extern char *cpu_name_version[][2]; /* mapping from enum cpu_type to strings */
struct pa_iodc_driver { struct parisc_driver {
unsigned short hw_type:5; /* HPHW_xxx */ struct parisc_driver *next;
unsigned short hversion;
unsigned short hversion_rev;
unsigned long sversion:28;
unsigned short sversion_rev;
unsigned short opt;
unsigned int check; /* Components that are significant */
char *name; char *name;
char *version; const struct parisc_device_id *id_table;
int (* callback)(struct hp_device *d, struct pa_iodc_driver *dri); int (*probe) (struct parisc_device *dev); /* New device discovered */
}; };
#define DRIVER_CHECK_HWTYPE 1 struct io_module {
#define DRIVER_CHECK_HVERSION 2 volatile uint32_t nothing; /* reg 0 */
#define DRIVER_CHECK_SVERSION 4 volatile uint32_t io_eim;
#define DRIVER_CHECK_OPT 8 volatile uint32_t io_dc_adata;
/* The following two are useless right now */ volatile uint32_t io_ii_cdata;
#define DRIVER_CHECK_HVERSION_REV 16 volatile uint32_t io_dma_link; /* reg 4 */
#define DRIVER_CHECK_SVERSION_REV 32 volatile uint32_t io_dma_command;
#define DRIVER_CHECK_EVERYTHING 63 volatile uint32_t io_dma_address;
volatile uint32_t io_dma_count;
volatile uint32_t io_flex; /* reg 8 */
volatile uint32_t io_spa_address;
volatile uint32_t reserved1[2];
volatile uint32_t io_command; /* reg 12 */
volatile uint32_t io_status;
volatile uint32_t io_control;
volatile uint32_t io_data;
volatile uint32_t reserved2; /* reg 16 */
volatile uint32_t chain_addr;
volatile uint32_t sub_mask_clr;
volatile uint32_t reserved3[13];
volatile uint32_t undefined[480];
volatile uint32_t unpriv[512];
};
struct bc_module {
volatile uint32_t unused1[12];
volatile uint32_t io_command;
volatile uint32_t io_status;
volatile uint32_t io_control;
volatile uint32_t unused2[1];
volatile uint32_t io_err_resp;
volatile uint32_t io_err_info;
volatile uint32_t io_err_req;
volatile uint32_t unused3[11];
volatile uint32_t io_io_low;
volatile uint32_t io_io_high;
};
#define HPHW_NPROC 0 #define HPHW_NPROC 0
#define HPHW_MEMORY 1 #define HPHW_MEMORY 1
...@@ -88,16 +127,33 @@ struct pa_iodc_driver { ...@@ -88,16 +127,33 @@ struct pa_iodc_driver {
#define HPHW_FABRIC 14 #define HPHW_FABRIC 14
#define HPHW_FAULTY 31 #define HPHW_FAULTY 31
extern struct hp_hardware hp_hardware_list[];
char *parisc_getHWtype( unsigned short hw_type ); /* hardware.c: */
extern const char *parisc_hardware_description(struct parisc_device_id *id);
extern enum cpu_type parisc_get_cpu_type(unsigned long hversion);
/* Attention: first hversion, then sversion...! */ struct pci_dev;
char *parisc_getHWdescription( unsigned short hw_type,
unsigned long hversion, /* have to be long ! */
unsigned long sversion );
enum cpu_type parisc_get_cpu_type( unsigned long hversion ); /* drivers.c: */
extern struct parisc_device *alloc_pa_dev(unsigned long hpa,
struct hardware_path *path);
extern int register_parisc_device(struct parisc_device *dev);
extern int register_parisc_driver(struct parisc_driver *driver);
extern int count_parisc_driver(struct parisc_driver *driver);
extern int unregister_parisc_driver(struct parisc_driver *driver);
extern void walk_central_bus(void);
extern void fixup_child_irqs(struct parisc_device *parent, int irqbase,
int (*choose)(struct parisc_device *parent));
extern void print_subdevices(struct parisc_device *dev);
extern const struct parisc_device *find_pa_parent_type(const struct parisc_device *, int);
extern void print_parisc_devices(void);
extern char *print_pa_hwpath(struct parisc_device *dev, char *path);
extern char *print_pci_hwpath(struct pci_dev *dev, char *path);
extern void get_pci_node_path(struct pci_dev *dev, struct hardware_path *path);
extern int register_driver(struct pa_iodc_driver *driver);
#endif /* inventory.c: */
extern void do_memory_inventory(void);
extern void do_device_inventory(void);
#endif /* _PARISC_HARDWARE_H */
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* (c) 1999 Matthew Wilcox * (c) 1999 Matthew Wilcox
*/ */
extern unsigned long hil_base; /* declared in drivers/gsc/hil.c */ extern unsigned long hil_base; /* declared in drivers/parisc/hil.c */
extern unsigned int hil_irq; extern unsigned int hil_irq;
#define HILBASE hil_base /* 0xf0821000 (old) or 0xf0201000 (new) */ #define HILBASE hil_base /* 0xf0821000 (old) or 0xf0201000 (new) */
......
...@@ -5,29 +5,29 @@ ...@@ -5,29 +5,29 @@
*/ */
/* /*
* This file contains the i386 architecture specific IDE code. * This file contains the PARISC architecture specific IDE code.
*/ */
#ifndef __ASMi386_IDE_H #ifndef __ASM_PARISC_IDE_H
#define __ASMi386_IDE_H #define __ASM_PARISC_IDE_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h> #include <linux/config.h>
#include <asm/superio.h>
#ifndef MAX_HWIFS #ifndef MAX_HWIFS
#define MAX_HWIFS 10 #define MAX_HWIFS 2
#endif #endif
static __inline__ int ide_default_irq(ide_ioreg_t base) static __inline__ int ide_default_irq(ide_ioreg_t base)
{ {
switch (base) { switch (base) {
case 0x1f0: return 14; #ifdef CONFIG_SUPERIO
case 0x170: return 15; case 0x1f0:
case 0x1e8: return 11; case 0x170:
case 0x168: return 10; return superio_get_ide_irq();
case 0x1e0: return 8; #endif /* CONFIG_SUPERIO */
case 0x160: return 12;
default: default:
return 0; return 0;
} }
...@@ -36,12 +36,10 @@ static __inline__ int ide_default_irq(ide_ioreg_t base) ...@@ -36,12 +36,10 @@ static __inline__ int ide_default_irq(ide_ioreg_t base)
static __inline__ ide_ioreg_t ide_default_io_base(int index) static __inline__ ide_ioreg_t ide_default_io_base(int index)
{ {
switch (index) { switch (index) {
case 0: return 0x1f0; #ifdef CONFIG_SUPERIO
case 1: return 0x170; case 0: return (superio_get_ide_irq() ? 0x1f0 : 0);
case 2: return 0x1e8; case 1: return (superio_get_ide_irq() ? 0x170 : 0);
case 3: return 0x168; #endif /* CONFIG_SUPERIO */
case 4: return 0x1e0;
case 5: return 0x160;
default: default:
return 0; return 0;
} }
...@@ -63,6 +61,7 @@ static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, ide_ioreg_t data_port, ...@@ -63,6 +61,7 @@ static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, ide_ioreg_t data_port,
} }
if (irq != NULL) if (irq != NULL)
*irq = 0; *irq = 0;
hw->io_ports[IDE_IRQ_OFFSET] = 0;
} }
static __inline__ void ide_init_default_hwifs(void) static __inline__ void ide_init_default_hwifs(void)
...@@ -79,6 +78,19 @@ static __inline__ void ide_init_default_hwifs(void) ...@@ -79,6 +78,19 @@ static __inline__ void ide_init_default_hwifs(void)
#endif #endif
} }
#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
#define ide_check_region(from,extent) check_region((from), (extent))
#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
#define ide_release_region(from,extent) release_region((from), (extent))
/*
* The following are not needed for the non-m68k ports
*/
#define ide_ack_intr(hwif) (1)
#define ide_release_lock(lock) do {} while (0)
#define ide_get_lock(lock, hdlr, data) do {} while (0)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASMi386_IDE_H */ #endif /* __ASM_PARISC_IDE_H */
#ifndef _ASM_IO_H #ifndef _ASM_IO_H
#define _ASM_IO_H #define _ASM_IO_H
/* USE_HPPA_IOREMAP IS THE MAGIC FLAG TO ENABLE OR DISABLE REAL IOREMAP() FUNCTIONALITY */
/* FOR 712 or 715 MACHINES THIS SHOULD BE ENABLED,
NEWER MACHINES STILL HAVE SOME ISSUES IN THE SCSI AND/OR NETWORK DRIVERS AND
BECAUSE OF THAT I WILL LEAVE IT DISABLED FOR NOW <deller@gmx.de> */
/* WHEN THOSE ISSUES ARE SOLVED, USE_HPPA_IOREMAP WILL GO AWAY */
#define USE_HPPA_IOREMAP 0
#include <linux/config.h> #include <linux/config.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/gsc.h> #include <asm/pgtable.h>
#define virt_to_phys(a) ((unsigned long)__pa(a)) #define virt_to_phys(a) ((unsigned long)__pa(a))
#define phys_to_virt(a) __va(a) #define phys_to_virt(a) __va(a)
#define virt_to_bus virt_to_phys #define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt #define bus_to_virt phys_to_virt
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
/* Memory mapped IO */
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
extern inline void * ioremap(unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, 0);
}
/*
* This one maps high address device memory and turns off caching for that area.
* it's useful if some control registers are in such an area and write combining
* or read caching is not desirable:
*/
extern inline void * ioremap_nocache (unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, _PAGE_NO_CACHE /* _PAGE_PCD */);
}
extern void iounmap(void *addr);
/*
* __raw_ variants have no defined meaning. on hppa, it means `i was
* too lazy to ioremap first'. kind of like isa_, except that there's
* no additional base address to add on.
*/
extern __inline__ unsigned char __raw_readb(unsigned long addr)
{
long flags;
unsigned char ret;
__asm__ __volatile__(
" rsm 2,%0\n"
" ldbx 0(%2),%1\n"
" mtsm %0\n"
: "=&r" (flags), "=r" (ret) : "r" (addr) );
return ret;
}
extern __inline__ unsigned short __raw_readw(unsigned long addr)
{
long flags;
unsigned short ret;
__asm__ __volatile__(
" rsm 2,%0\n"
" ldhx 0(%2),%1\n"
" mtsm %0\n"
: "=&r" (flags), "=r" (ret) : "r" (addr) );
return ret;
}
extern __inline__ unsigned int __raw_readl(unsigned long addr)
{
u32 ret;
__asm__ __volatile__(
" ldwax 0(%1),%0\n"
: "=r" (ret) : "r" (addr) );
return ret;
}
extern __inline__ unsigned long long __raw_readq(unsigned long addr)
{
unsigned long long ret;
#ifdef __LP64__
__asm__ __volatile__(
" ldda 0(%1),%0\n"
: "=r" (ret) : "r" (addr) );
#else
/* two reads may have side effects.. */
ret = ((u64) __raw_readl(addr)) << 32;
ret |= __raw_readl(addr+4);
#endif
return ret;
}
extern __inline__ void __raw_writeb(unsigned char val, unsigned long addr)
{
long flags;
__asm__ __volatile__(
" rsm 2,%0\n"
" stbs %1,0(%2)\n"
" mtsm %0\n"
: "=&r" (flags) : "r" (val), "r" (addr) );
}
extern __inline__ void __raw_writew(unsigned short val, unsigned long addr)
{
long flags;
__asm__ __volatile__(
" rsm 2,%0\n"
" sths %1,0(%2)\n"
" mtsm %0\n"
: "=&r" (flags) : "r" (val), "r" (addr) );
}
extern __inline__ void __raw_writel(unsigned int val, unsigned long addr)
{
__asm__ __volatile__(
" stwas %0,0(%1)\n"
: : "r" (val), "r" (addr) );
}
extern __inline__ void __raw_writeq(unsigned long long val, unsigned long addr)
{
#ifdef __LP64__
__asm__ __volatile__(
" stda %0,0(%1)\n"
: : "r" (val), "r" (addr) );
#else
/* two writes may have side effects.. */
__raw_writel(val >> 32, addr);
__raw_writel(val, addr+4);
#endif
}
#if USE_HPPA_IOREMAP
#define readb(addr) (*(volatile unsigned char *) (addr))
#define readw(addr) (*(volatile unsigned short *) (addr))
#define readl(addr) (*(volatile unsigned int *) (addr))
#define readq(addr) (*(volatile u64 *) (addr))
#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
#define writeq(b,addr) (*(volatile u64 *) (addr) = (b))
#else /* !USE_HPPA_IOREMAP */
#define readb(addr) __raw_readb((unsigned long)(addr))
#define readw(addr) le16_to_cpu(__raw_readw((unsigned long)(addr)))
#define readl(addr) le32_to_cpu(__raw_readl((unsigned long)(addr)))
#define readq(addr) le64_to_cpu(__raw_readq((unsigned long)(addr)))
#define writeb(b,addr) __raw_writeb(b,(unsigned long)(addr))
#define writew(b,addr) __raw_writew(cpu_to_le16(b),(unsigned long)(addr))
#define writel(b,addr) __raw_writel(cpu_to_le32(b),(unsigned long)(addr))
#define writeq(b,addr) __raw_writeq(cpu_to_le64(b),(unsigned long)(addr))
#endif /* !USE_HPPA_IOREMAP */
extern void memcpy_fromio(void *dest, unsigned long src, int count);
extern void memcpy_toio(unsigned long dest, const void *src, int count);
extern void memset_io(unsigned long dest, char fill, int count);
/* Support old drivers which don't ioremap.
* NB this interface is scheduled to disappear in 2.5
*/
#define EISA_BASE 0xfffffffffc000000UL
#define isa_readb(a) readb(EISA_BASE | (a))
#define isa_readw(a) readw(EISA_BASE | (a))
#define isa_readl(a) readl(EISA_BASE | (a))
#define isa_writeb(b,a) writeb((b), EISA_BASE | (a))
#define isa_writew(b,a) writew((b), EISA_BASE | (a))
#define isa_writel(b,a) writel((b), EISA_BASE | (a))
#define isa_memset_io(a,b,c) memset_io(EISA_BASE | (a), (b), (c))
#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a), EISA_BASE | (b), (c))
#define isa_memcpy_toio(a,b,c) memcpy_toio(EISA_BASE | (a), (b), (c))
/*
* XXX - We don't have csum_partial_copy_fromio() yet, so we cheat here and
* just copy it. The net code will then do the checksum later. Presently
* only used by some shared memory 8390 Ethernet cards anyway.
*/
#define eth_io_copy_and_sum(skb,src,len,unused) \
memcpy_fromio((skb)->data,(src),(len))
#define isa_eth_io_copy_and_sum(skb,src,len,unused) \
isa_memcpy_fromio((skb)->data,(src),(len))
/* Port-space IO */
#define inb_p inb #define inb_p inb
#define inw_p inw #define inw_p inw
#define inl_p inl #define inl_p inl
...@@ -17,42 +202,66 @@ ...@@ -17,42 +202,66 @@
#define outw_p outw #define outw_p outw
#define outl_p outl #define outl_p outl
#define readb gsc_readb extern unsigned char eisa_in8(unsigned short port);
#define readw gsc_readw extern unsigned short eisa_in16(unsigned short port);
#define readl gsc_readl extern unsigned int eisa_in32(unsigned short port);
#define writeb gsc_writeb extern void eisa_out8(unsigned char data, unsigned short port);
#define writew gsc_writew extern void eisa_out16(unsigned short data, unsigned short port);
#define writel gsc_writel extern void eisa_out32(unsigned int data, unsigned short port);
#if defined(CONFIG_PCI)
extern unsigned char inb(int addr);
extern unsigned short inw(int addr);
extern unsigned int inl(int addr);
#if defined(CONFIG_PCI) || defined(CONFIG_ISA) extern void outb(unsigned char b, int addr);
/* extern void outw(unsigned short b, int addr);
* So we get clear link errors extern void outl(unsigned int b, int addr);
*/ #elif defined(CONFIG_EISA)
extern u8 inb(unsigned long addr); #define inb eisa_in8
extern u16 inw(unsigned long addr); #define inw eisa_in16
extern u32 inl(unsigned long addr); #define inl eisa_in32
#define outb eisa_out8
#define outw eisa_out16
#define outl eisa_out32
#else
static inline char inb(unsigned long addr)
{
BUG();
return -1;
}
extern void outb(unsigned char b, unsigned long addr); static inline short inw(unsigned long addr)
extern void outw(unsigned short b, unsigned long addr); {
extern void outl(u32 b, unsigned long addr); BUG();
return -1;
}
static inline void memcpy_toio(void *dest, void *src, int count) static inline int inl(unsigned long addr)
{ {
while(count--) BUG();
writeb(*((char *)src)++, (char *)dest++); return -1;
} }
#define outb(x, y) BUG()
#define outw(x, y) BUG()
#define outl(x, y) BUG()
#endif #endif
/*
* String versions of in/out ops:
*/
extern void insb (unsigned long port, void *dst, unsigned long count);
extern void insw (unsigned long port, void *dst, unsigned long count);
extern void insl (unsigned long port, void *dst, unsigned long count);
extern void outsb (unsigned long port, const void *src, unsigned long count);
extern void outsw (unsigned long port, const void *src, unsigned long count);
extern void outsl (unsigned long port, const void *src, unsigned long count);
/* IO Port space is : BBiiii where BB is HBA number. */ /* IO Port space is : BBiiii where BB is HBA number. */
#define IO_SPACE_LIMIT 0x00ffffff #define IO_SPACE_LIMIT 0x00ffffff
/* Right now we don't support Dino-on-a-card and V class which do PCI MMIO
* through address/data registers. */
#define ioremap(__offset, __size) ((void *)(__offset))
#define iounmap(__addr)
#define dma_cache_inv(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while(0) #define dma_cache_inv(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while(0)
#define dma_cache_wback(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while (0) #define dma_cache_wback(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while (0)
......
...@@ -69,6 +69,10 @@ ...@@ -69,6 +69,10 @@
#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
#define FIOQSIZE 0x5460 /* Get exact space used by quota */ #define FIOQSIZE 0x5460 /* Get exact space used by quota */
#define TIOCSTART 0x5461
#define TIOCSTOP 0x5462
#define TIOCSLTC 0x5462
/* Used for packet mode */ /* Used for packet mode */
#define TIOCPKT_DATA 0 #define TIOCPKT_DATA 0
#define TIOCPKT_FLUSHREAD 1 #define TIOCPKT_FLUSHREAD 1
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
** fixup_irq is to initialize PCI IRQ line support and ** fixup_irq is to initialize PCI IRQ line support and
** virtualize pcidev->irq value. To be called by pci_fixup_bus(). ** virtualize pcidev->irq value. To be called by pci_fixup_bus().
*/ */
extern void *iosapic_register(void *hpa); extern void *iosapic_register(unsigned long hpa);
extern int iosapic_fixup_irq(void *obj, struct pci_dev *pcidev); extern int iosapic_fixup_irq(void *obj, struct pci_dev *pcidev);
......
...@@ -2,10 +2,26 @@ ...@@ -2,10 +2,26 @@
#define __PARISC_IPCBUF_H__ #define __PARISC_IPCBUF_H__
/* /*
* The ipc64_perm structure for PA-RISC is identical to kern_ipc_perm * The ipc64_perm structure for PA-RISC is almost identical to
* as we have always had 32-bit UIDs and GIDs in the kernel. * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the kernel.
* 'seq' has been changed from long to int so that it's the same size
* on 64-bit kernels as on 32-bit ones.
*/ */
#define ipc64_perm kern_ipc_perm struct ipc64_perm
{
key_t key;
uid_t uid;
gid_t gid;
uid_t cuid;
gid_t cgid;
unsigned short int __pad1;
mode_t mode;
unsigned short int __pad2;
unsigned short int seq;
unsigned int __pad3;
unsigned long long int __unused1;
unsigned long long int __unused2;
};
#endif /* __PARISC_IPCBUF_H__ */ #endif /* __PARISC_IPCBUF_H__ */
#ifndef _ASM_IRQ_H
#define _ASM_IRQ_H
#include <linux/string.h>
#include <asm/ptrace.h>
#include <linux/interrupt.h>
#include <asm/types.h>
/* /*
* linux/include/asm/irq.h * linux/include/asm-parisc/irq.h
* *
* (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar, * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar,
* Copyright 1999 SuSE GmbH * Copyright 1999 SuSE GmbH
...@@ -16,33 +8,39 @@ ...@@ -16,33 +8,39 @@
* <tomsoft@informatik.tu-chemnitz.de> * <tomsoft@informatik.tu-chemnitz.de>
*/ */
#ifndef _ASM_PARISC_IRQ_H
#define _ASM_PARISC_IRQ_H
#include <asm/ptrace.h>
#include <asm/types.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#define CPU_IRQ_REGION 1 #define CPU_IRQ_REGION 1
#define TIMER_IRQ (IRQ_FROM_REGION(CPU_IRQ_REGION) | 0) #define TIMER_IRQ (IRQ_FROM_REGION(CPU_IRQ_REGION) | 0)
#define IPI_IRQ (IRQ_FROM_REGION(CPU_IRQ_REGION) | 1) #define IPI_IRQ (IRQ_FROM_REGION(CPU_IRQ_REGION) | 1)
/* This should be 31 for PA1.1 binaries and 63 for PA-2.0 wide mode) */ /* This should be 31 for PA1.1 binaries and 63 for PA-2.0 wide mode */
#define MAX_CPU_IRQ (BITS_PER_LONG - 1) #define MAX_CPU_IRQ (BITS_PER_LONG - 1)
#if 1 /* set to 1 to get the new irq offsets, or ... */ #if BITS_PER_LONG == 32
# if BITS_PER_LONG == 32
# define IRQ_REGION_SHIFT 5 # define IRQ_REGION_SHIFT 5
# else #else
# define IRQ_REGION_SHIFT 6 # define IRQ_REGION_SHIFT 6
# endif
#else /* 256 irq-entries per region (wastes memory, maybe gains speed? :-))*/
# define IRQ_REGION_SHIFT 8
#endif #endif
#define IRQ_PER_REGION (1 << IRQ_REGION_SHIFT) #define IRQ_PER_REGION (1 << IRQ_REGION_SHIFT)
#define NR_IRQ_REGS 8 #define NR_IRQ_REGS 16
#define NR_IRQS (NR_IRQ_REGS * IRQ_PER_REGION) #define NR_IRQS (NR_IRQ_REGS * IRQ_PER_REGION)
#define IRQ_REGION(irq) ((irq) >> IRQ_REGION_SHIFT) #define IRQ_REGION(irq) ((irq) >> IRQ_REGION_SHIFT)
#define IRQ_OFFSET(irq) ((irq) & ((1<<IRQ_REGION_SHIFT)-1)) #define IRQ_OFFSET(irq) ((irq) & ((1<<IRQ_REGION_SHIFT)-1))
#define IRQ_FROM_REGION(reg) ((reg) << IRQ_REGION_SHIFT) #define IRQ_FROM_REGION(reg) ((reg) << IRQ_REGION_SHIFT)
#define IRQ_REG_DIS 1 /* support disable_irq / enable_irq */ #define EISA_IRQ_REGION 0 /* region 0 needs to be reserved for EISA */
#define IRQ_REG_MASK 2 /* require IRQs to be masked */ #define EISA_MAX_IRQS 16 /* max. (E)ISA irq line */
struct irq_region_ops { struct irq_region_ops {
void (*disable_irq)(void *dev, int irq); void (*disable_irq)(void *dev, int irq);
...@@ -54,8 +52,8 @@ struct irq_region_ops { ...@@ -54,8 +52,8 @@ struct irq_region_ops {
struct irq_region_data { struct irq_region_data {
void *dev; void *dev;
const char *name; const char *name;
unsigned flags;
int irqbase; int irqbase;
unsigned int status[IRQ_PER_REGION]; /* IRQ status */
}; };
struct irq_region { struct irq_region {
...@@ -69,21 +67,31 @@ extern struct irq_region *irq_region[NR_IRQ_REGS]; ...@@ -69,21 +67,31 @@ extern struct irq_region *irq_region[NR_IRQ_REGS];
static __inline__ int irq_cannonicalize(int irq) static __inline__ int irq_cannonicalize(int irq)
{ {
#ifdef CONFIG_EISA
return (irq == (IRQ_FROM_REGION(EISA_IRQ_REGION)+2)
? (IRQ_FROM_REGION(EISA_IRQ_REGION)+9) : irq);
#else
return irq; return irq;
#endif
} }
extern void disable_irq(int); extern void disable_irq(int);
#define disable_irq_nosync(i) disable_irq(i)
extern void enable_irq(int); extern void enable_irq(int);
extern void do_irq(struct irqaction *a, int i, struct pt_regs *p);
extern void do_irq_mask(unsigned long mask, struct irq_region *region, extern void do_irq_mask(unsigned long mask, struct irq_region *region,
struct pt_regs *regs); struct pt_regs *regs);
extern struct irq_region *alloc_irq_region(int count, struct irq_region_ops *ops, extern struct irq_region *alloc_irq_region(int count, struct irq_region_ops *ops,
unsigned long flags, const char *name, void *dev); const char *name, void *dev);
extern int txn_alloc_irq(void); extern int txn_alloc_irq(void);
extern int txn_claim_irq(int); extern int txn_claim_irq(int);
extern unsigned int txn_alloc_data(int, unsigned int); extern unsigned int txn_alloc_data(int, unsigned int);
extern unsigned long txn_alloc_addr(int); extern unsigned long txn_alloc_addr(int);
#endif /* _ASM_IRQ_H */ /* soft power switch support (power.c) */
extern struct tasklet_struct power_tasklet;
#endif /* _ASM_PARISC_IRQ_H */
/*
* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
* ---------------------------------------------------------------
* This file will be removed as soon as we have converted
* hp_psaux.c and hp_keyb.c to the input layer !
*
*/
/*
* linux/include/asm-parisc/keyboard.h
*
* Original by Geert Uytterhoeven
* updates by Alex deVries <adevries@thepuffingroup.com>
* portions copyright (1999) The Puffin Group
* mostly rewritten by Philipp Rumpf <prumpf@tux.org>,
* Copyright 2000 Philipp Rumpf
*/
/*
* We try to keep the amount of generic code as low as possible -
* we want to support all HIL, PS/2, and untranslated USB keyboards
*/
#ifndef _PARISC_KEYBOARD_H
#define _PARISC_KEYBOARD_H
#include <linux/config.h>
#ifdef __KERNEL__
#ifdef CONFIG_VT
#include <linux/kernel.h>
#include <linux/kd.h>
/* These are basically the generic functions / variables. The only
* unexpected detail is the initialization sequence for the keyboard
* driver is something like this:
*
* detect keyboard port
* detect keyboard
* call register_kbd_ops
* wait for init_hw
*
* only after init_hw has been called you're allowed to call
* handle_scancode. This means you either have to be extremely
* careful or use a global flag or something - I strongly suggest
* the latter. prumpf */
extern struct kbd_ops {
int (*setkeycode)(unsigned int, unsigned int);
int (*getkeycode)(unsigned int);
int (*translate)(unsigned char, unsigned char *, char);
char (*unexpected_up)(unsigned char);
void (*leds)(unsigned char);
void (*init_hw)(void);
/* Keyboard driver resource allocation */
void (*kbd_request_region)(void);
int (*kbd_request_irq)(void (*handler)(int, void *, struct pt_regs *));
/* Methods to access the keyboard processor's I/O registers */
unsigned char (*kbd_read_input)(void);
void (*kbd_write_output)(unsigned char val);
void (*kbd_write_command)(unsigned char val);
unsigned char (*kbd_read_status)(void);
unsigned char sysrq_key;
unsigned char *sysrq_xlate;
} *kbd_ops;
#define kbd_setkeycode (*kbd_ops->setkeycode)
#define kbd_getkeycode (*kbd_ops->getkeycode)
#define kbd_translate (*kbd_ops->translate)
#define kbd_unexpected_up (*kbd_ops->unexpected_up)
#define kbd_leds (*kbd_ops->leds)
#define kbd_init_hw (*kbd_ops->init_hw)
#define SYSRQ_KEY (kbd_ops->sysrq_key)
#define kbd_sysrq_xlate (kbd_ops->sysrq_xlate)
/* Do the actual calls via kbd_ops vector */
#define kbd_request_region() kbd_ops->kbd_request_region()
#define kbd_request_irq(handler) kbd_ops->kbd_request_irq(handler)
#define kbd_read_input() kbd_ops->kbd_read_input()
#define kbd_write_output(val) kbd_ops->kbd_write_output(val)
#define kbd_write_command(val) kbd_ops->kbd_write_command(val)
#define kbd_read_status() kbd_ops->kbd_read_status()
extern unsigned char hp_ps2kbd_sysrq_xlate[128]; /* from drivers/char/hp_keyb.c */
extern void unregister_kbd_ops(void);
extern void register_kbd_ops(struct kbd_ops *ops);
#endif /* CONFIG_VT */
#endif /* __KERNEL__ */
#endif /* __ASMPARISC_KEYBOARD_H */
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
#include <linux/config.h>
#if CONFIG_DEBUG_HIGHMEM
# define D(n) __KM_FENCE_##n ,
#else
# define D(n)
#endif
enum km_type {
D(0) KM_BOUNCE_READ,
D(1) KM_SKB_SUNRPC_DATA,
D(2) KM_SKB_DATA_SOFTIRQ,
D(3) KM_USER0,
D(4) KM_USER1,
D(5) KM_BIO_SRC_IRQ,
D(6) KM_BIO_DST_IRQ,
D(7) KM_PTE0,
D(8) KM_PTE1,
D(9) KM_IRQ0,
D(10) KM_IRQ1,
D(11) KM_TYPE_NR
};
#undef D
#endif
#ifndef LED_H #ifndef LED_H
#define LED_H #define LED_H
#define LED7 0x80 /* top (or furthest right) LED */ #define LED7 0x80 /* top (or furthest right) LED */
#define LED6 0x40 #define LED6 0x40
#define LED5 0x20 #define LED5 0x20
...@@ -16,18 +15,27 @@ ...@@ -16,18 +15,27 @@
#define LED_DISK_IO LED2 /* for disk activity */ #define LED_DISK_IO LED2 /* for disk activity */
#define LED_HEARTBEAT LED3 /* heartbeat */ #define LED_HEARTBEAT LED3 /* heartbeat */
/* values for pdc_chassis_lcd_info_ret_block.model: */
#define DISPLAY_MODEL_LCD 0 /* KittyHawk LED or LCD */
#define DISPLAY_MODEL_NONE 1 /* no LED or LCD */
#define DISPLAY_MODEL_LASI 2 /* LASI style 8 bit LED */
#define DISPLAY_MODEL_OLD_ASP 0x7F /* faked: ASP style 8 x 1 bit LED (only very old ASP versions) */
#define LED_CMD_REG_NONE NULL /* NULL == no addr for the cmd register */
/* irq function */ /* led tasklet struct */
extern void led_interrupt_func(void); extern struct tasklet_struct led_tasklet;
/* LASI & ASP specific LED initialization funcs */ /* register_led_driver() */
extern void __init lasi_led_init( unsigned long lasi_hpa ); int __init register_led_driver( int model, char *cmd_reg, char *data_reg );
extern void __init asp_led_init( unsigned long led_ptr );
/* registers the LED regions for procfs */ /* registers the LED regions for procfs */
extern void __init register_led_regions(void); void __init register_led_regions(void);
/* writes a string to the LCD display (if possible on this h/w) */
int lcd_print(char *str);
/* main LED initialization function (uses the PDC) */ /* main LED initialization function (uses PDC) */
extern int __init led_init(void); int __init led_init(void);
#endif /* LED_H */ #endif /* LED_H */
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#define PROT_READ 0x1 /* page can be read */ #define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */ #define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */ #define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
#define PROT_NONE 0x0 /* page can not be accessed */ #define PROT_NONE 0x0 /* page can not be accessed */
#define MAP_SHARED 0x01 /* Share changes */ #define MAP_SHARED 0x01 /* Share changes */
......
/*
* parisc mmu structures
*/
#ifndef _PARISC_MMU_H_ #ifndef _PARISC_MMU_H_
#define _PARISC_MMU_H_ #define _PARISC_MMU_H_
#ifndef __ASSEMBLY__ /* On parisc, we store the space id here */
/* Default "unsigned long" context */
typedef unsigned long mm_context_t; typedef unsigned long mm_context_t;
/* Hardware Page Table Entry */
typedef struct _PTE {
unsigned long v:1; /* Entry is valid */
unsigned long tag:31; /* Unique Tag */
unsigned long r:1; /* referenced */
unsigned long os_1:1; /* */
unsigned long t:1; /* page reference trap */
unsigned long d:1; /* dirty */
unsigned long b:1; /* break */
unsigned long type:3; /* access type */
unsigned long pl1:2; /* PL1 (execute) */
unsigned long pl2:2; /* PL2 (write) */
unsigned long u:1; /* uncacheable */
unsigned long id:1; /* access id */
unsigned long os_2:1; /* */
unsigned long os_3:3; /* */
unsigned long res_1:4; /* */
unsigned long phys:20; /* physical page number */
unsigned long os_4:2; /* */
unsigned long res_2:3; /* */
unsigned long next; /* pointer to next page */
} PTE;
/*
* Simulated two-level MMU. This structure is used by the kernel
* to keep track of MMU mappings and is used to update/maintain
* the hardware HASH table which is really a cache of mappings.
*
* The simulated structures mimic the hardware available on other
* platforms, notably the 80x86 and 680x0.
*/
typedef struct _pte {
unsigned long page_num:20;
unsigned long flags:12; /* Page flags (some unused bits) */
} pte;
#define PD_SHIFT (10+12) /* Page directory */
#define PD_MASK 0x02FF
#define PT_SHIFT (12) /* Page Table */
#define PT_MASK 0x02FF
#define PG_SHIFT (12) /* Page Entry */
/* MMU context */
typedef struct _MMU_context {
long pid[4];
pte **pmap; /* Two-level page-map structure */
} MMU_context;
#endif /* __ASSEMBLY__ */
#endif /* _PARISC_MMU_H_ */ #endif /* _PARISC_MMU_H_ */
#ifndef __PARISC_MMU_CONTEXT_H #ifndef __PARISC_MMU_CONTEXT_H
#define __PARISC_MMU_CONTEXT_H #define __PARISC_MMU_CONTEXT_H
#include <linux/mm.h>
#include <asm/atomic.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{ {
} }
...@@ -14,17 +19,10 @@ extern void free_sid(unsigned long); ...@@ -14,17 +19,10 @@ extern void free_sid(unsigned long);
static inline int static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm) init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
/* if (atomic_read(&mm->mm_users) != 1)
* Init_new_context can be called for a cloned mm, so we BUG();
* only allocate a space id if one hasn't been allocated
* yet AND mm != &init_mm (cloned kernel thread which
* will run in the kernel space with spaceid 0).
*/
if ((mm != &init_mm) && (mm->context == 0)) {
mm->context = alloc_sid(); mm->context = alloc_sid();
}
return 0; return 0;
} }
...@@ -35,15 +33,22 @@ destroy_context(struct mm_struct *mm) ...@@ -35,15 +33,22 @@ destroy_context(struct mm_struct *mm)
mm->context = 0; mm->context = 0;
} }
static inline void load_context(mm_context_t context)
{
mtsp(context, 3);
#if SPACEID_SHIFT == 0
mtctl(context << 1,8);
#else
mtctl(context >> (SPACEID_SHIFT - 1),8);
#endif
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
{ {
if (prev != next) { if (prev != next) {
/* Re-load page tables */ mtctl(__pa(next->pgd), 25);
tsk->thread.pg_tables = __pa(next->pgd); load_context(next->context);
mtctl(tsk->thread.pg_tables, 25);
mtsp(next->context,3);
} }
} }
......
#ifndef _PARISC_MMZONE_H
#define _PARISC_MMZONE_H
struct node_map_data {
pg_data_t pg_data;
struct page *adj_node_mem_map;
};
extern struct node_map_data node_data[];
extern unsigned char *chunkmap;
#define BADCHUNK ((unsigned char)0xff)
#define CHUNKSZ (256*1024*1024)
#define CHUNKSHIFT 28
#define CHUNKMASK (~(CHUNKSZ - 1))
#define CHUNKNUM(paddr) ((paddr) >> CHUNKSHIFT)
#define NODE_DATA(nid) (&node_data[nid].pg_data)
#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
#define ADJ_NODE_MEM_MAP(nid) (node_data[nid].adj_node_mem_map)
#define phys_to_page(paddr) \
(ADJ_NODE_MEM_MAP(chunkmap[CHUNKNUM((paddr))]) \
+ ((paddr) >> PAGE_SHIFT))
#define virt_to_page(kvaddr) phys_to_page(__pa(kvaddr))
/* This is kind of bogus, need to investigate performance of doing it right */
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#endif /* !_PARISC_MMZONE_H */
#ifndef _ASM_PARISC_MODULE_H
#define _ASM_PARISC_MODULE_H
/*
* This file contains the parisc architecture specific module code.
*/
#define module_map(x) vmalloc(x)
#define module_unmap(x) vfree(x)
#define module_arch_init(x) (0)
#define arch_init_modules(x) do { } while (0)
#endif /* _ASM_PARISC_MODULE_H */
...@@ -13,11 +13,17 @@ ...@@ -13,11 +13,17 @@
struct msqid64_ds { struct msqid64_ds {
struct ipc64_perm msg_perm; struct ipc64_perm msg_perm;
#ifndef __LP64__
unsigned int __pad1; unsigned int __pad1;
#endif
__kernel_time_t msg_stime; /* last msgsnd time */ __kernel_time_t msg_stime; /* last msgsnd time */
#ifndef __LP64__
unsigned int __pad2; unsigned int __pad2;
#endif
__kernel_time_t msg_rtime; /* last msgrcv time */ __kernel_time_t msg_rtime; /* last msgrcv time */
#ifndef __LP64__
unsigned int __pad3; unsigned int __pad3;
#endif
__kernel_time_t msg_ctime; /* last change time */ __kernel_time_t msg_ctime; /* last change time */
unsigned int msg_cbytes; /* current number of bytes on queue */ unsigned int msg_cbytes; /* current number of bytes on queue */
unsigned int msg_qnum; /* number of messages in queue */ unsigned int msg_qnum; /* number of messages in queue */
......
...@@ -9,11 +9,31 @@ ...@@ -9,11 +9,31 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/cache.h>
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) #define copy_page(to,from) copy_user_page_asm((void *)(to), (void *)(from))
struct page;
#define clear_user_page(page, vaddr) clear_page(page) extern void purge_kernel_dcache_page(unsigned long);
#define copy_user_page(to, from, vaddr) copy_page(to, from) extern void copy_user_page_asm(void *to, void *from);
extern void clear_user_page_asm(void *page, unsigned long vaddr);
static inline void
copy_user_page(void *vto, void *vfrom, unsigned long vaddr, struct page *pg)
{
copy_user_page_asm(vto, vfrom);
flush_kernel_dcache_page(vto);
/* XXX: ppc flushes icache too, should we? */
}
static inline void
clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
purge_kernel_dcache_page((unsigned long)page);
clear_user_page_asm(page, vaddr);
}
/* /*
* These are used to make use of C type-checking.. * These are used to make use of C type-checking..
...@@ -47,6 +67,20 @@ extern __inline__ int get_order(unsigned long size) ...@@ -47,6 +67,20 @@ extern __inline__ int get_order(unsigned long size)
return order; return order;
} }
#ifdef __LP64__
#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
#else
#define MAX_PHYSMEM_RANGES 1 /* First range is only range that fits in 32 bits */
#endif
typedef struct __physmem_range {
unsigned long start_pfn;
unsigned long pages; /* PAGE_SIZE pages */
} physmem_range_t;
extern physmem_range_t pmem_ranges[];
extern int npmem_ranges;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
/* to align the pointer to the (next) page boundary */ /* to align the pointer to the (next) page boundary */
...@@ -68,7 +102,7 @@ extern __inline__ int get_order(unsigned long size) ...@@ -68,7 +102,7 @@ extern __inline__ int get_order(unsigned long size)
#define LINUX_GATEWAY_SPACE 0 #define LINUX_GATEWAY_SPACE 0
#define __PAGE_OFFSET (0xc0000000) #define __PAGE_OFFSET (0x10000000)
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
/* These macros don't work for 64-bit C code -- don't allow in C at all */ /* These macros don't work for 64-bit C code -- don't allow in C at all */
...@@ -78,8 +112,16 @@ extern __inline__ int get_order(unsigned long size) ...@@ -78,8 +112,16 @@ extern __inline__ int get_order(unsigned long size)
#endif #endif
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#ifndef CONFIG_DISCONTIGMEM
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT)) #define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr) #define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#endif /* !CONFIG_DISCONTIGMEM */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
#ifndef _ASMPARISC_PARAM_H #ifndef _ASMPARISC_PARAM_H
#define _ASMPARISC_PARAM_H #define _ASMPARISC_PARAM_H
#ifdef __KERNEL__
# ifdef CONFIG_PA20
# define HZ 1000 /* Faster machines */
# else
# define HZ 100 /* Internal kernel timer frequency */
# endif
# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
#endif
#ifndef HZ #ifndef HZ
#define HZ 100 #define HZ 100
#endif #endif
...@@ -17,8 +27,4 @@ ...@@ -17,8 +27,4 @@
#define MAXHOSTNAMELEN 64 /* max length of hostname */ #define MAXHOSTNAMELEN 64 /* max length of hostname */
#ifdef __KERNEL__
# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
#endif
#endif #endif
...@@ -3,9 +3,6 @@ ...@@ -3,9 +3,6 @@
#include <asm/scatterlist.h> #include <asm/scatterlist.h>
#define MIN_PCI_PORT 0x000000
#define MAX_PCI_PORT 0xffffff
/* /*
** HP PCI platforms generally support multiple bus adapters. ** HP PCI platforms generally support multiple bus adapters.
** (workstations 1-~4, servers 2-~32) ** (workstations 1-~4, servers 2-~32)
...@@ -19,7 +16,7 @@ ...@@ -19,7 +16,7 @@
#define PCI_MAX_BUSSES 256 #define PCI_MAX_BUSSES 256
/* [soapbox on] /* [soapbox on]
** Who the hell can develope stuff without ASSERT or VASSERT? ** Who the hell can develop stuff without ASSERT or VASSERT?
** No one understands all the modules across all platforms. ** No one understands all the modules across all platforms.
** For linux add another dimension - processor architectures. ** For linux add another dimension - processor architectures.
** **
...@@ -49,18 +46,40 @@ ...@@ -49,18 +46,40 @@
** Data needed by pcibios layer belongs here. ** Data needed by pcibios layer belongs here.
*/ */
struct pci_hba_data { struct pci_hba_data {
struct pci_hba_data *next; /* global chain of HBAs */ unsigned long base_addr; /* aka Host Physical Address */
char *base_addr; /* aka Host Physical Address */ const struct parisc_device *dev; /* device from PA bus walk */
struct hp_device *iodc_info; /* Info from PA bus walk */
struct pci_bus *hba_bus; /* primary PCI bus below HBA */ struct pci_bus *hba_bus; /* primary PCI bus below HBA */
int hba_num; /* I/O port space access "key" */ int hba_num; /* I/O port space access "key" */
struct resource bus_num; /* PCI bus numbers */ struct resource bus_num; /* PCI bus numbers */
struct resource io_space; /* PIOP */ struct resource io_space; /* PIOP */
struct resource mem_space; /* LMMIO */ struct resource lmmio_space; /* bus addresses < 4Gb */
unsigned long mem_space_offset; /* VCLASS support */ struct resource elmmio_space; /* additional bus addresses < 4Gb */
unsigned long lmmio_space_offset; /* CPU view - PCI view */
void * iommu; /* IOMMU this device is under */
/* REVISIT - spinlock to protect resources? */ /* REVISIT - spinlock to protect resources? */
}; };
#define HBA_DATA(d) ((struct pci_hba_data *) (d))
/*
** We support 2^16 I/O ports per HBA. These are set up in the form
** 0xbbxxxx, where bb is the bus number and xxxx is the I/O port
** space address.
*/
#define HBA_PORT_SPACE_BITS 16
#define HBA_PORT_BASE(h) ((h) << HBA_PORT_SPACE_BITS)
#define HBA_PORT_SPACE_SIZE (1UL << HBA_PORT_SPACE_BITS)
#define PCI_PORT_HBA(a) ((a) >> HBA_PORT_SPACE_BITS)
#define PCI_PORT_ADDR(a) ((a) & (HBA_PORT_SPACE_SIZE - 1))
/*
** Convert between PCI (IO_VIEW) addresses and processor (PA_VIEW) addresses.
** Note that we currently support only LMMIO.
*/
#define PCI_BUS_ADDR(hba,a) ((a) - hba->lmmio_space_offset)
#define PCI_HOST_ADDR(hba,a) ((a) + hba->lmmio_space_offset)
/* /*
** KLUGE: linux/pci.h include asm/pci.h BEFORE declaring struct pci_bus ** KLUGE: linux/pci.h include asm/pci.h BEFORE declaring struct pci_bus
...@@ -69,6 +88,12 @@ struct pci_hba_data { ...@@ -69,6 +88,12 @@ struct pci_hba_data {
struct pci_bus; struct pci_bus;
struct pci_dev; struct pci_dev;
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (1)
/* /*
** Most PCI devices (eg Tulip, NCR720) also export the same registers ** Most PCI devices (eg Tulip, NCR720) also export the same registers
** to both MMIO and I/O port space. Due to poor performance of I/O Port ** to both MMIO and I/O port space. Due to poor performance of I/O Port
...@@ -106,9 +131,6 @@ struct pci_bios_ops { ...@@ -106,9 +131,6 @@ struct pci_bios_ops {
void (*fixup_bus)(struct pci_bus *bus); void (*fixup_bus)(struct pci_bus *bus);
}; };
extern void pcibios_size_bridge(struct pci_bus *, struct pbus_set_ranges_data *);
/* /*
** See Documentation/DMA-mapping.txt ** See Documentation/DMA-mapping.txt
*/ */
...@@ -127,8 +149,8 @@ struct pci_dma_ops { ...@@ -127,8 +149,8 @@ struct pci_dma_ops {
/* /*
** We could live without the hppa_dma_ops indirection if we didn't want ** We could live without the hppa_dma_ops indirection if we didn't want
** to support 4 different dma models with one binary or they were ** to support 4 different coherent dma models with one binary (they will
** all loadable modules: ** someday be loadable modules):
** I/O MMU consistent method dma_sync behavior ** I/O MMU consistent method dma_sync behavior
** ============= ====================== ======================= ** ============= ====================== =======================
** a) PA-7x00LC uncachable host memory flush/purge ** a) PA-7x00LC uncachable host memory flush/purge
...@@ -144,8 +166,11 @@ struct pci_dma_ops { ...@@ -144,8 +166,11 @@ struct pci_dma_ops {
*/ */
extern struct pci_dma_ops *hppa_dma_ops; extern struct pci_dma_ops *hppa_dma_ops;
#ifdef CONFIG_PA11
extern struct pci_dma_ops pcxl_dma_ops; extern struct pci_dma_ops pcxl_dma_ops;
extern struct pci_dma_ops pcx_dma_ops; extern struct pci_dma_ops pcx_dma_ops;
#endif
/* /*
** Oops hard if we haven't setup hppa_dma_ops by the time the first driver ** Oops hard if we haven't setup hppa_dma_ops by the time the first driver
...@@ -155,7 +180,9 @@ extern struct pci_dma_ops pcx_dma_ops; ...@@ -155,7 +180,9 @@ extern struct pci_dma_ops pcx_dma_ops;
*/ */
static inline int pci_dma_panic(char *msg) static inline int pci_dma_panic(char *msg)
{ {
extern void panic(const char *, ...); /* linux/kernel.h */
panic(msg); panic(msg);
/* NOTREACHED */
return -1; return -1;
} }
...@@ -196,16 +223,32 @@ static inline int pci_dma_panic(char *msg) ...@@ -196,16 +223,32 @@ static inline int pci_dma_panic(char *msg)
hppa_dma_ops->dma_sync_sg(p, sg, n, d); \ hppa_dma_ops->dma_sync_sg(p, sg, n, d); \
} }
/* No highmem on parisc, plus we have an IOMMU, so mapping pages is easy. */
#define pci_map_page(dev, page, off, size, dir) \
pci_map_single(dev, (page_address(page) + (off)), size, dir)
#define pci_unmap_page(dev,addr,sz,dir) pci_unmap_single(dev,addr,sz,dir)
/* Don't support DAC yet. */
#define pci_dac_dma_supported(pci_dev, mask) (0)
/* /*
** Stuff declared in arch/parisc/kernel/pci.c ** Stuff declared in arch/parisc/kernel/pci.c
*/ */
extern struct pci_port_ops *pci_port; extern struct pci_port_ops *pci_port;
extern struct pci_bios_ops *pci_bios; extern struct pci_bios_ops *pci_bios;
extern int pci_post_reset_delay; /* delay after de-asserting #RESET */ extern int pci_post_reset_delay; /* delay after de-asserting #RESET */
extern int pci_hba_count;
extern struct pci_hba_data *parisc_pci_hba[];
#ifdef CONFIG_PCI
extern void pcibios_register_hba(struct pci_hba_data *); extern void pcibios_register_hba(struct pci_hba_data *);
extern void pcibios_set_master(struct pci_dev *);
extern void pcibios_assign_unassigned_resources(struct pci_bus *); extern void pcibios_assign_unassigned_resources(struct pci_bus *);
#else
extern inline void pcibios_register_hba(struct pci_hba_data *x)
{
}
#endif
/* /*
** used by drivers/pci/pci.c:pci_do_scan_bus() ** used by drivers/pci/pci.c:pci_do_scan_bus()
...@@ -216,12 +259,7 @@ extern void pcibios_assign_unassigned_resources(struct pci_bus *); ...@@ -216,12 +259,7 @@ extern void pcibios_assign_unassigned_resources(struct pci_bus *);
** To date, only alpha sets this to one. We'll need to set this ** To date, only alpha sets this to one. We'll need to set this
** to zero for legacy platforms and one for PAT platforms. ** to zero for legacy platforms and one for PAT platforms.
*/ */
#ifdef __LP64__ #define pcibios_assign_all_busses() (pdc_type == PDC_TYPE_PAT)
extern int pdc_pat; /* arch/parisc/kernel/inventory.c */
#define pcibios_assign_all_busses() pdc_pat
#else
#define pcibios_assign_all_busses() 0
#endif
#define PCIBIOS_MIN_IO 0x10 #define PCIBIOS_MIN_IO 0x10
#define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */ #define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */
...@@ -229,4 +267,32 @@ extern int pdc_pat; /* arch/parisc/kernel/inventory.c */ ...@@ -229,4 +267,32 @@ extern int pdc_pat; /* arch/parisc/kernel/inventory.c */
/* Return the index of the PCI controller for device PDEV. */ /* Return the index of the PCI controller for device PDEV. */
#define pci_controller_num(PDEV) (0) #define pci_controller_num(PDEV) (0)
#define GET_IOC(dev) ((struct ioc *)(HBA_DATA(dev->sysdata)->iommu))
#ifdef CONFIG_IOMMU_CCIO
struct parisc_device;
struct ioc;
void * ccio_get_iommu(const struct parisc_device *dev);
struct pci_dev * ccio_get_fake(const struct parisc_device *dev);
int ccio_request_resource(const struct parisc_device *dev,
struct resource *res);
int ccio_allocate_resource(const struct parisc_device *dev,
struct resource *res, unsigned long size,
unsigned long min, unsigned long max, unsigned long align,
void (*alignf)(void *, struct resource *, unsigned long, unsigned long),
void *alignf_data);
#else /* !CONFIG_IOMMU_CCIO */
#define ccio_get_iommu(dev) NULL
#define ccio_get_fake(dev) NULL
#define ccio_request_resource(dev, res) request_resource(&iomem_resource, res)
#define ccio_allocate_resource(dev, res, size, min, max, align, alignf, data) \
allocate_resource(&iomem_resource, res, size, min, max, \
align, alignf, data)
#endif /* !CONFIG_IOMMU_CCIO */
#ifdef CONFIG_IOMMU_SBA
struct parisc_device;
void * sba_get_iommu(struct parisc_device *dev);
#endif
#endif /* __ASM_PARISC_PCI_H */ #endif /* __ASM_PARISC_PCI_H */
This diff is collapsed.
This diff is collapsed.
#ifndef _PARISC_PERCPU_H
#define _PARISC_PERCPU_H
#include <asm-generic/percpu.h>
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -19,10 +19,17 @@ typedef int __kernel_suseconds_t; ...@@ -19,10 +19,17 @@ typedef int __kernel_suseconds_t;
typedef int __kernel_clock_t; typedef int __kernel_clock_t;
typedef int __kernel_daddr_t; typedef int __kernel_daddr_t;
/* Note these change from narrow to wide kernels */ /* Note these change from narrow to wide kernels */
#ifdef __LP64__
typedef unsigned long __kernel_size_t; typedef unsigned long __kernel_size_t;
typedef long __kernel_ssize_t; typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t; typedef long __kernel_ptrdiff_t;
typedef long __kernel_time_t; typedef long __kernel_time_t;
#else
typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
typedef int __kernel_time_t;
#endif
typedef char * __kernel_caddr_t; typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t; typedef unsigned short __kernel_uid16_t;
...@@ -44,6 +51,10 @@ typedef struct { ...@@ -44,6 +51,10 @@ typedef struct {
#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */ #endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
} __kernel_fsid_t; } __kernel_fsid_t;
/* compatibility stuff */
typedef __kernel_uid_t __kernel_old_uid_t;
typedef __kernel_gid_t __kernel_old_gid_t;
#if defined(__KERNEL__) && defined(__LP64__) #if defined(__KERNEL__) && defined(__LP64__)
/* Now 32bit compatibility types */ /* Now 32bit compatibility types */
typedef unsigned int __kernel_dev_t32; typedef unsigned int __kernel_dev_t32;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -5,5 +5,8 @@ ...@@ -5,5 +5,8 @@
/* declared in arch/parisc/kernel/setup.c */ /* declared in arch/parisc/kernel/setup.c */
extern struct proc_dir_entry * proc_runway_root; extern struct proc_dir_entry * proc_runway_root;
#define RUNWAY_STATUS 0x10
#define RUNWAY_DEBUG 0x40
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* ASM_PARISC_RUNWAY_H */ #endif /* ASM_PARISC_RUNWAY_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#include <asm-generic/xor.h>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment