Commit f5e706ad authored by Sam Ravnborg's avatar Sam Ravnborg Committed by David S. Miller

sparc: join the remaining header files

With this commit all sparc64 header files are moved to asm-sparc.
The remaining files (71 files) were too different to be trivially
merged so divide them up in a _32.h and a _64.h file which
are both included from the file with no bit size.

The following script were used:
cd include
FILES=`wc -l asm-sparc64/*h | grep -v '^     1' | cut -b 20-`

for FILE in ${FILES}; do
  echo $FILE:
  BASE=`echo $FILE | cut -d '.' -f 1`
  FN32=${BASE}_32.h
  FN64=${BASE}_64.h
  GUARD=___ASM_SPARC_`echo $BASE | tr '-' '_' | tr [:lower:] [:upper:]`_H
  git mv asm-sparc/$FILE asm-sparc/$FN32
  git mv asm-sparc64/$FILE asm-sparc/$FN64
  echo git mv done
  printf "#ifndef %s\n" $GUARD                             >   asm-sparc/$FILE
  printf "#define %s\n" $GUARD                             >>  asm-sparc/$FILE
  printf "#if defined(__sparc__) && defined(__arch64__)\n" >>  asm-sparc/$FILE
  printf "#include <asm-sparc/%s>\n" $FN64                 >>  asm-sparc/$FILE
  printf "#else\n"                                         >>  asm-sparc/$FILE
  printf "#include <asm-sparc/%s>\n" $FN32                 >>  asm-sparc/$FILE
  printf "#endif\n"                                        >>  asm-sparc/$FILE
  printf "#endif\n"                                        >>  asm-sparc/$FILE
  git add asm-sparc/$FILE
  echo new file done
  printf "#include <asm-sparc/%s>\n" $FILE                 >  asm-sparc64/$FILE
  git add asm-sparc64/$FILE
  echo sparc64 file done
done

The guard contains three '_' to avoid conflict with existing guards.
In additing the two Kbuild files are emptied to avoid breaking
headers_* targets.
We will reintroduce the exported header files when the necessary
kbuild changes are merged.
Signed-off-by: default avatarSam Ravnborg <sam@ravnborg.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5e3609f6
include include/asm-generic/Kbuild.asm
header-y += apc.h
header-y += asi.h
header-y += bpp.h
header-y += display7seg.h
header-y += envctrl.h
header-y += jsflash.h
header-y += openprom.h
header-y += openpromio.h
header-y += psrcompat.h
header-y += pstate.h
header-y += reg.h
header-y += traps.h
header-y += uctx.h
header-y += utrap.h
header-y += vfc_ioctls.h
header-y += watchdog.h
unifdef-y += fbio.h
unifdef-y += perfctr.h
unifdef-y += psr.h
# dummy file to avoid breaking make headers_install
/* atomic.h: These still suck, but the I-cache hit rate is higher.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
* Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
*
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
*/
#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__
#include <linux/types.h>
typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
#define ATOMIC_INIT(i) { (i) }
extern int __atomic_add_return(int, atomic_t *);
extern int atomic_cmpxchg(atomic_t *, int, int);
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
extern int atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int);
#define atomic_read(v) ((v)->counter)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* This is the old 24-bit implementation. It's still used internally
* by some sparc-specific code, notably the semaphore implementation.
*/
typedef struct { volatile int counter; } atomic24_t;
#ifndef CONFIG_SMP
#define ATOMIC24_INIT(i) { (i) }
#define atomic24_read(v) ((v)->counter)
#define atomic24_set(v, i) (((v)->counter) = i)
#ifndef ___ASM_SPARC_ATOMIC_H
#define ___ASM_SPARC_ATOMIC_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/atomic_64.h>
#else
/* We do the bulk of the actual work out of line in two common
* routines in assembler, see arch/sparc/lib/atomic.S for the
* "fun" details.
*
* For SMP the trick is you embed the spin lock byte within
* the word, use the low byte so signedness is easily retained
* via a quick arithmetic shift. It looks like this:
*
* ----------------------------------------
* | signed 24-bit counter value | lock | atomic_t
* ----------------------------------------
* 31 8 7 0
*/
#define ATOMIC24_INIT(i) { ((i) << 8) }
static inline int atomic24_read(const atomic24_t *v)
{
int ret = v->counter;
while(ret & 0xff)
ret = v->counter;
return ret >> 8;
}
#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
#include <asm-sparc/atomic_32.h>
#endif
#endif
static inline int __atomic24_add(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_add\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
static inline int __atomic24_sub(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
#define atomic24_dec_return(v) __atomic24_sub(1, (v))
#define atomic24_inc_return(v) __atomic24_add(1, (v))
#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* !(__KERNEL__) */
#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC_ATOMIC__) */
/* atomic.h: These still suck, but the I-cache hit rate is higher.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
* Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
*
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
*/
#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__
#include <linux/types.h>
typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
#define ATOMIC_INIT(i) { (i) }
extern int __atomic_add_return(int, atomic_t *);
extern int atomic_cmpxchg(atomic_t *, int, int);
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
extern int atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int);
#define atomic_read(v) ((v)->counter)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* This is the old 24-bit implementation. It's still used internally
* by some sparc-specific code, notably the semaphore implementation.
*/
typedef struct { volatile int counter; } atomic24_t;
#ifndef CONFIG_SMP
#define ATOMIC24_INIT(i) { (i) }
#define atomic24_read(v) ((v)->counter)
#define atomic24_set(v, i) (((v)->counter) = i)
#else
/* We do the bulk of the actual work out of line in two common
* routines in assembler, see arch/sparc/lib/atomic.S for the
* "fun" details.
*
* For SMP the trick is you embed the spin lock byte within
* the word, use the low byte so signedness is easily retained
* via a quick arithmetic shift. It looks like this:
*
* ----------------------------------------
* | signed 24-bit counter value | lock | atomic_t
* ----------------------------------------
* 31 8 7 0
*/
#define ATOMIC24_INIT(i) { ((i) << 8) }
static inline int atomic24_read(const atomic24_t *v)
{
int ret = v->counter;
while(ret & 0xff)
ret = v->counter;
return ret >> 8;
}
#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
#endif
static inline int __atomic24_add(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_add\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
static inline int __atomic24_sub(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
#define atomic24_dec_return(v) __atomic24_sub(1, (v))
#define atomic24_inc_return(v) __atomic24_add(1, (v))
#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* !(__KERNEL__) */
#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC_ATOMIC__) */
/* atomic.h: Thankfully the V9 is at least reasonable for this
* stuff.
*
* Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
*/
#ifndef __ARCH_SPARC64_ATOMIC__
#define __ARCH_SPARC64_ATOMIC__
#include <linux/types.h>
#include <asm/system.h>
typedef struct { volatile int counter; } atomic_t;
typedef struct { volatile __s64 counter; } atomic64_t;
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter)
#define atomic64_read(v) ((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
#define atomic64_set(v, i) (((v)->counter) = i)
extern void atomic_add(int, atomic_t *);
extern void atomic64_add(int, atomic64_t *);
extern void atomic_sub(int, atomic_t *);
extern void atomic64_sub(int, atomic64_t *);
extern int atomic_add_ret(int, atomic_t *);
extern int atomic64_add_ret(int, atomic64_t *);
extern int atomic_sub_ret(int, atomic_t *);
extern int atomic64_sub_ret(int, atomic64_t *);
#define atomic_dec_return(v) atomic_sub_ret(1, v)
#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
#define atomic_inc_return(v) atomic_add_ret(1, v)
#define atomic64_inc_return(v) atomic64_add_ret(1, v)
#define atomic_sub_return(i, v) atomic_sub_ret(i, v)
#define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
#define atomic_add_return(i, v) atomic_add_ret(i, v)
#define atomic64_add_return(i, v) atomic64_add_ret(i, v)
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
#define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0)
#define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
#define atomic_inc(v) atomic_add(1, v)
#define atomic64_inc(v) atomic64_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
#define atomic64_dec(v) atomic64_sub(1, v)
#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
/* Atomic operations are already serializing */
#ifdef CONFIG_SMP
#define smp_mb__before_atomic_dec() membar_storeload_loadload();
#define smp_mb__after_atomic_dec() membar_storeload_storestore();
#define smp_mb__before_atomic_inc() membar_storeload_loadload();
#define smp_mb__after_atomic_inc() membar_storeload_storestore();
#else
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif
#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
/*
* auxio.h: Definitions and code for the Auxiliary I/O register.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_AUXIO_H
#define _SPARC_AUXIO_H
#include <asm/system.h>
#include <asm/vaddrs.h>
/* This register is an unsigned char in IO space. It does two things.
* First, it is used to control the front panel LED light on machines
* that have it (good for testing entry points to trap handlers and irq's)
* Secondly, it controls various floppy drive parameters.
*/
#define AUXIO_ORMEIN 0xf0 /* All writes must set these bits. */
#define AUXIO_ORMEIN4M 0xc0 /* sun4m - All writes must set these bits. */
#define AUXIO_FLPY_DENS 0x20 /* Floppy density, high if set. Read only. */
#define AUXIO_FLPY_DCHG 0x10 /* A disk change occurred. Read only. */
#define AUXIO_EDGE_ON 0x10 /* sun4m - On means Jumper block is in. */
#define AUXIO_FLPY_DSEL 0x08 /* Drive select/start-motor. Write only. */
#define AUXIO_LINK_TEST 0x08 /* sun4m - On means TPE Carrier detect. */
/* Set the following to one, then zero, after doing a pseudo DMA transfer. */
#define AUXIO_FLPY_TCNT 0x04 /* Floppy terminal count. Write only. */
/* Set the following to zero to eject the floppy. */
#define AUXIO_FLPY_EJCT 0x02 /* Eject floppy disk. Write only. */
#define AUXIO_LED 0x01 /* On if set, off if unset. Read/Write */
#ifndef __ASSEMBLY__
/*
* NOTE: these routines are implementation dependent--
* understand the hardware you are querying!
*/
extern void set_auxio(unsigned char bits_on, unsigned char bits_off);
extern unsigned char get_auxio(void); /* .../asm-sparc/floppy.h */
/*
* The following routines are provided for driver-compatibility
* with sparc64 (primarily sunlance.c)
*/
#define AUXIO_LTE_ON 1
#define AUXIO_LTE_OFF 0
/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
*
* on - AUXIO_LTE_ON or AUXIO_LTE_OFF
*/
#define auxio_set_lte(on) \
do { \
if(on) { \
set_auxio(AUXIO_LINK_TEST, 0); \
} else { \
set_auxio(0, AUXIO_LINK_TEST); \
} \
} while (0)
#define AUXIO_LED_ON 1
#define AUXIO_LED_OFF 0
/* auxio_set_led - Set system front panel LED
*
* on - AUXIO_LED_ON or AUXIO_LED_OFF
*/
#define auxio_set_led(on) \
do { \
if(on) { \
set_auxio(AUXIO_LED, 0); \
} else { \
set_auxio(0, AUXIO_LED); \
} \
} while (0)
#endif /* !(__ASSEMBLY__) */
/* AUXIO2 (Power Off Control) */
extern __volatile__ unsigned char * auxio_power_register;
#define AUXIO_POWER_DETECT_FAILURE 32
#define AUXIO_POWER_CLEAR_FAILURE 2
#define AUXIO_POWER_OFF 1
#endif /* !(_SPARC_AUXIO_H) */
#ifndef ___ASM_SPARC_AUXIO_H
#define ___ASM_SPARC_AUXIO_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/auxio_64.h>
#else
#include <asm-sparc/auxio_32.h>
#endif
#endif
/*
* auxio.h: Definitions and code for the Auxiliary I/O register.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_AUXIO_H
#define _SPARC_AUXIO_H
#include <asm/system.h>
#include <asm/vaddrs.h>
/* This register is an unsigned char in IO space. It does two things.
* First, it is used to control the front panel LED light on machines
* that have it (good for testing entry points to trap handlers and irq's)
* Secondly, it controls various floppy drive parameters.
*/
#define AUXIO_ORMEIN 0xf0 /* All writes must set these bits. */
#define AUXIO_ORMEIN4M 0xc0 /* sun4m - All writes must set these bits. */
#define AUXIO_FLPY_DENS 0x20 /* Floppy density, high if set. Read only. */
#define AUXIO_FLPY_DCHG 0x10 /* A disk change occurred. Read only. */
#define AUXIO_EDGE_ON 0x10 /* sun4m - On means Jumper block is in. */
#define AUXIO_FLPY_DSEL 0x08 /* Drive select/start-motor. Write only. */
#define AUXIO_LINK_TEST 0x08 /* sun4m - On means TPE Carrier detect. */
/* Set the following to one, then zero, after doing a pseudo DMA transfer. */
#define AUXIO_FLPY_TCNT 0x04 /* Floppy terminal count. Write only. */
/* Set the following to zero to eject the floppy. */
#define AUXIO_FLPY_EJCT 0x02 /* Eject floppy disk. Write only. */
#define AUXIO_LED 0x01 /* On if set, off if unset. Read/Write */
#ifndef __ASSEMBLY__
/*
* NOTE: these routines are implementation dependent--
* understand the hardware you are querying!
*/
extern void set_auxio(unsigned char bits_on, unsigned char bits_off);
extern unsigned char get_auxio(void); /* .../asm-sparc/floppy.h */
/*
* The following routines are provided for driver-compatibility
* with sparc64 (primarily sunlance.c)
*/
#define AUXIO_LTE_ON 1
#define AUXIO_LTE_OFF 0
/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
*
* on - AUXIO_LTE_ON or AUXIO_LTE_OFF
*/
#define auxio_set_lte(on) \
do { \
if(on) { \
set_auxio(AUXIO_LINK_TEST, 0); \
} else { \
set_auxio(0, AUXIO_LINK_TEST); \
} \
} while (0)
#define AUXIO_LED_ON 1
#define AUXIO_LED_OFF 0
/* auxio_set_led - Set system front panel LED
*
* on - AUXIO_LED_ON or AUXIO_LED_OFF
*/
#define auxio_set_led(on) \
do { \
if(on) { \
set_auxio(AUXIO_LED, 0); \
} else { \
set_auxio(0, AUXIO_LED); \
} \
} while (0)
#endif /* !(__ASSEMBLY__) */
/* AUXIO2 (Power Off Control) */
extern __volatile__ unsigned char * auxio_power_register;
#define AUXIO_POWER_DETECT_FAILURE 32
#define AUXIO_POWER_CLEAR_FAILURE 2
#define AUXIO_POWER_OFF 1
#endif /* !(_SPARC_AUXIO_H) */
/*
* auxio.h: Definitions and code for the Auxiliary I/O registers.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*
* Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
*/
#ifndef _SPARC64_AUXIO_H
#define _SPARC64_AUXIO_H
/* AUXIO implementations:
* sbus-based NCR89C105 "Slavio"
* LED/Floppy (AUX1) register
* Power (AUX2) register
*
* ebus-based auxio on PCIO
* LED Auxio Register
* Power Auxio Register
*
* Register definitions from NCR _NCR89C105 Chip Specification_
*
* SLAVIO AUX1 @ 0x1900000
* -------------------------------------------------
* | (R) | (R) | D | (R) | E | M | T | L |
* -------------------------------------------------
* (R) - bit 7:6,4 are reserved and should be masked in s/w
* D - Floppy Density Sense (1=high density) R/O
* E - Link Test Enable, directly reflected on AT&T 7213 LTE pin
* M - Monitor/Mouse Mux, directly reflected on MON_MSE_MUX pin
* T - Terminal Count: sends TC pulse to 82077 floppy controller
* L - System LED on front panel (0=off, 1=on)
*/
#define AUXIO_AUX1_MASK 0xc0 /* Mask bits */
#define AUXIO_AUX1_FDENS 0x20 /* Floppy Density Sense */
#define AUXIO_AUX1_LTE 0x08 /* Link Test Enable */
#define AUXIO_AUX1_MMUX 0x04 /* Monitor/Mouse Mux */
#define AUXIO_AUX1_FTCNT 0x02 /* Terminal Count, */
#define AUXIO_AUX1_LED 0x01 /* System LED */
/* SLAVIO AUX2 @ 0x1910000
* -------------------------------------------------
* | (R) | (R) | D | (R) | (R) | (R) | C | F |
* -------------------------------------------------
* (R) - bits 7:6,4:2 are reserved and should be masked in s/w
* D - Power Failure Detect (1=power fail)
* C - Clear Power Failure Detect Int (1=clear)
* F - Power Off (1=power off)
*/
#define AUXIO_AUX2_MASK 0xdc /* Mask Bits */
#define AUXIO_AUX2_PFAILDET 0x20 /* Power Fail Detect */
#define AUXIO_AUX2_PFAILCLR 0x02 /* Clear Pwr Fail Det Intr */
#define AUXIO_AUX2_PWR_OFF 0x01 /* Power Off */
/* Register definitions from Sun Microsystems _PCIO_ p/n 802-7837
*
* PCIO LED Auxio @ 0x726000
* -------------------------------------------------
* | 31:1 Unused | LED |
* -------------------------------------------------
* Bits 31:1 unused
* LED - System LED on front panel (0=off, 1=on)
*/
#define AUXIO_PCIO_LED 0x01 /* System LED */
/* PCIO Power Auxio @ 0x724000
* -------------------------------------------------
* | 31:2 Unused | CPO | SPO |
* -------------------------------------------------
* Bits 31:2 unused
* CPO - Courtesy Power Off (1=off)
* SPO - System Power Off (1=off)
*/
#define AUXIO_PCIO_CPWR_OFF 0x02 /* Courtesy Power Off */
#define AUXIO_PCIO_SPWR_OFF 0x01 /* System Power Off */
#ifndef __ASSEMBLY__
extern void __iomem *auxio_register;
#define AUXIO_LTE_ON 1
#define AUXIO_LTE_OFF 0
/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
*
* on - AUXIO_LTE_ON or AUXIO_LTE_OFF
*/
extern void auxio_set_lte(int on);
#define AUXIO_LED_ON 1
#define AUXIO_LED_OFF 0
/* auxio_set_led - Set system front panel LED
*
* on - AUXIO_LED_ON or AUXIO_LED_OFF
*/
extern void auxio_set_led(int on);
#endif /* ifndef __ASSEMBLY__ */
#endif /* !(_SPARC64_AUXIO_H) */
/*
* bitops.h: Bit string operations on the Sparc.
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright 2001 Anton Blanchard (anton@samba.org)
*/
#ifndef _SPARC_BITOPS_H
#define _SPARC_BITOPS_H
#include <linux/compiler.h>
#include <asm/byteorder.h>
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#ifndef ___ASM_SPARC_BITOPS_H
#define ___ASM_SPARC_BITOPS_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/bitops_64.h>
#else
#include <asm-sparc/bitops_32.h>
#endif
#endif
extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
/*
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
* is in the highest of the four bytes and bit '31' is the high bit
* within the first byte. Sparc is BIG-Endian. Unless noted otherwise
* all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___set_bit(ADDR, mask) != 0;
}
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___set_bit(ADDR, mask);
}
static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___clear_bit(ADDR, mask) != 0;
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___clear_bit(ADDR, mask);
}
static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___change_bit(ADDR, mask) != 0;
}
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___change_bit(ADDR, mask);
}
#include <asm-generic/bitops/non-atomic.h>
#define smp_mb__before_clear_bit() do { } while(0)
#define smp_mb__after_clear_bit() do { } while(0)
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
#endif /* defined(_SPARC_BITOPS_H) */
/*
* bitops.h: Bit string operations on the Sparc.
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright 2001 Anton Blanchard (anton@samba.org)
*/
#ifndef _SPARC_BITOPS_H
#define _SPARC_BITOPS_H
#include <linux/compiler.h>
#include <asm/byteorder.h>
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
/*
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
* is in the highest of the four bytes and bit '31' is the high bit
* within the first byte. Sparc is BIG-Endian. Unless noted otherwise
* all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___set_bit(ADDR, mask) != 0;
}
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___set_bit(ADDR, mask);
}
static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___clear_bit(ADDR, mask) != 0;
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___clear_bit(ADDR, mask);
}
static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___change_bit(ADDR, mask) != 0;
}
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___change_bit(ADDR, mask);
}
#include <asm-generic/bitops/non-atomic.h>
#define smp_mb__before_clear_bit() do { } while(0)
#define smp_mb__after_clear_bit() do { } while(0)
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
#endif /* defined(_SPARC_BITOPS_H) */
/*
* bitops.h: Bit string operations on the V9.
*
* Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC64_BITOPS_H
#define _SPARC64_BITOPS_H
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/byteorder.h>
extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
extern void set_bit(unsigned long nr, volatile unsigned long *addr);
extern void clear_bit(unsigned long nr, volatile unsigned long *addr);
extern void change_bit(unsigned long nr, volatile unsigned long *addr);
#include <asm-generic/bitops/non-atomic.h>
#ifdef CONFIG_SMP
#define smp_mb__before_clear_bit() membar_storeload_loadload()
#define smp_mb__after_clear_bit() membar_storeload_storestore()
#else
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
#endif
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#ifdef __KERNEL__
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
/*
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
#ifdef ULTRA_HAS_POPULATION_COUNT
static inline unsigned int hweight64(unsigned long w)
{
unsigned int res;
__asm__ ("popc %1,%0" : "=r" (res) : "r" (w));
return res;
}
static inline unsigned int hweight32(unsigned int w)
{
unsigned int res;
__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff));
return res;
}
static inline unsigned int hweight16(unsigned int w)
{
unsigned int res;
__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff));
return res;
}
static inline unsigned int hweight8(unsigned int w)
{
unsigned int res;
__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff));
return res;
}
#else
#include <asm-generic/bitops/hweight.h>
#endif
#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
#include <asm-generic/bitops/ext2-non-atomic.h>
#define ext2_set_bit_atomic(lock,nr,addr) \
test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr))
#define ext2_clear_bit_atomic(lock,nr,addr) \
test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr))
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
#endif /* defined(_SPARC64_BITOPS_H) */
#ifndef _SPARC_CACHEFLUSH_H
#define _SPARC_CACHEFLUSH_H
#include <linux/mm.h> /* Common for other includes */
// #include <linux/kernel.h> from pgalloc.h
// #include <linux/sched.h> from pgalloc.h
// #include <asm/page.h>
#include <asm/btfixup.h>
/*
* Fine grained cache flushing.
*/
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
extern void smp_flush_cache_all(void);
extern void smp_flush_cache_mm(struct mm_struct *mm);
extern void smp_flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
extern void smp_flush_page_to_ram(unsigned long page);
extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
#endif /* CONFIG_SMP */
BTFIXUPDEF_CALL(void, flush_cache_all, void)
BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
extern void sparc_flush_page_to_ram(struct page *page);
#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
#endif /* _SPARC_CACHEFLUSH_H */
#ifndef ___ASM_SPARC_CACHEFLUSH_H
#define ___ASM_SPARC_CACHEFLUSH_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/cacheflush_64.h>
#else
#include <asm-sparc/cacheflush_32.h>
#endif
#endif
#ifndef _SPARC_CACHEFLUSH_H
#define _SPARC_CACHEFLUSH_H
#include <linux/mm.h> /* Common for other includes */
// #include <linux/kernel.h> from pgalloc.h
// #include <linux/sched.h> from pgalloc.h
// #include <asm/page.h>
#include <asm/btfixup.h>
/*
* Fine grained cache flushing.
*/
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
extern void smp_flush_cache_all(void);
extern void smp_flush_cache_mm(struct mm_struct *mm);
extern void smp_flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
extern void smp_flush_page_to_ram(unsigned long page);
extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
#endif /* CONFIG_SMP */
BTFIXUPDEF_CALL(void, flush_cache_all, void)
BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
extern void sparc_flush_page_to_ram(struct page *page);
#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
#endif /* _SPARC_CACHEFLUSH_H */
#ifndef _SPARC64_CACHEFLUSH_H
#define _SPARC64_CACHEFLUSH_H
#include <asm/page.h>
#ifndef __ASSEMBLY__
#include <linux/mm.h>
/* Cache flush operations. */
/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
#define flush_cache_range(vma, start, end) \
flush_cache_mm((vma)->vm_mm)
#define flush_cache_page(vma, page, pfn) \
flush_cache_mm((vma)->vm_mm)
/*
* On spitfire, the icache doesn't snoop local stores and we don't
* use block commit stores (which invalidate icache lines) during
* module load, so we need this.
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_icache_page(unsigned long);
extern void __flush_dcache_page(void *addr, int flush_icache);
extern void flush_dcache_page_impl(struct page *page);
#ifdef CONFIG_SMP
extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
#else
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
#endif
extern void __flush_dcache_range(unsigned long start, unsigned long end);
extern void flush_dcache_page(struct page *page);
#define flush_icache_page(vma, pg) do { } while(0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
unsigned long uaddr, void *kaddr,
unsigned long len, int write);
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
memcpy(dst, src, len); \
flush_ptrace_access(vma, page, vaddr, src, len, 0); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
memcpy(dst, src, len); \
flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
} while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#ifdef CONFIG_DEBUG_PAGEALLOC
/* internal debugging function */
void kernel_map_pages(struct page *page, int numpages, int enable);
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _SPARC64_CACHEFLUSH_H */
#ifndef __SPARC_CHECKSUM_H
#define __SPARC_CHECKSUM_H
/* checksum.h: IP/UDP/TCP checksum routines on the Sparc.
*
* Copyright(C) 1995 Linus Torvalds
* Copyright(C) 1995 Miguel de Icaza
* Copyright(C) 1996 David S. Miller
* Copyright(C) 1996 Eddie C. Dost
* Copyright(C) 1997 Jakub Jelinek
*
* derived from:
* Alpha checksum c-code
* ix86 inline assembly
* RFC1071 Computing the Internet Checksum
*/
#include <linux/in6.h>
#include <asm/uaccess.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* the same as csum_partial, but copies from fs:src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
register unsigned int ret asm("o0") = (unsigned int)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
__asm__ __volatile__ (
"call __csum_partial_copy_sparc_generic\n\t"
" mov %6, %%g7\n"
: "=&r" (ret), "=&r" (d), "=&r" (l)
: "0" (ret), "1" (d), "2" (l), "r" (sum)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5", "g7",
"memory", "cc");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err)
{
register unsigned long ret asm("o0") = (unsigned long)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,2\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err)
{
if (!access_ok (VERIFY_WRITE, dst, len)) {
*err = -EFAULT;
return sum;
} else {
register unsigned long ret asm("o0") = (unsigned long)src;
register char __user *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,1\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
}
#define HAVE_CSUM_COPY_USER
#define csum_and_copy_to_user csum_partial_copy_to_user
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
__sum16 sum;
/* Note: We must read %2 before we touch %0 for the first time,
* because GCC can legitimately use the same register for
* both operands.
*/
__asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
"ld\t[%1 + 0x00], %0\n\t"
"ld\t[%1 + 0x04], %%g2\n\t"
"ld\t[%1 + 0x08], %%g3\n\t"
"addcc\t%%g2, %0, %0\n\t"
"addxcc\t%%g3, %0, %0\n\t"
"ld\t[%1 + 0x0c], %%g2\n\t"
"ld\t[%1 + 0x10], %%g3\n\t"
"addxcc\t%%g2, %0, %0\n\t"
"addx\t%0, %%g0, %0\n"
"1:\taddcc\t%%g3, %0, %0\n\t"
"add\t%1, 4, %1\n\t"
"addxcc\t%0, %%g0, %0\n\t"
"subcc\t%%g4, 1, %%g4\n\t"
"be,a\t2f\n\t"
"sll\t%0, 16, %%g2\n\t"
"b\t1b\n\t"
"ld\t[%1 + 0x10], %%g3\n"
"2:\taddcc\t%0, %%g2, %%g2\n\t"
"srl\t%%g2, 16, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
"xnor\t%%g0, %0, %0"
: "=r" (sum), "=&r" (iph)
: "r" (ihl), "1" (iph)
: "g2", "g3", "g4", "cc", "memory");
return sum;
}
/* Fold a partial checksum without adding pseudo headers. */
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp;
__asm__ __volatile__("addcc\t%0, %1, %1\n\t"
"srl\t%1, 16, %1\n\t"
"addx\t%1, %%g0, %1\n\t"
"xnor\t%%g0, %1, %0"
: "=&r" (sum), "=r" (tmp)
: "0" (sum), "1" ((__force u32)sum<<16)
: "cc");
return (__force __sum16)sum;
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
__asm__ __volatile__("addcc\t%1, %0, %0\n\t"
"addxcc\t%2, %0, %0\n\t"
"addxcc\t%3, %0, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
: "=r" (sum), "=r" (saddr)
: "r" (daddr), "r" (proto + len), "0" (sum),
"1" (saddr)
: "cc");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__ __volatile__ (
"addcc %3, %4, %%g4\n\t"
"addxcc %5, %%g4, %%g4\n\t"
"ld [%2 + 0x0c], %%g2\n\t"
"ld [%2 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%2 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%2 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x0c], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"addxcc %%g3, %%g4, %0\n\t"
"addx 0, %0, %0\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr),
"r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
: "g2", "g3", "g4", "cc");
return csum_fold(sum);
}
/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#endif /* !(__SPARC_CHECKSUM_H) */
#ifndef ___ASM_SPARC_CHECKSUM_H
#define ___ASM_SPARC_CHECKSUM_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/checksum_64.h>
#else
#include <asm-sparc/checksum_32.h>
#endif
#endif
#ifndef __SPARC_CHECKSUM_H
#define __SPARC_CHECKSUM_H
/* checksum.h: IP/UDP/TCP checksum routines on the Sparc.
*
* Copyright(C) 1995 Linus Torvalds
* Copyright(C) 1995 Miguel de Icaza
* Copyright(C) 1996 David S. Miller
* Copyright(C) 1996 Eddie C. Dost
* Copyright(C) 1997 Jakub Jelinek
*
* derived from:
* Alpha checksum c-code
* ix86 inline assembly
* RFC1071 Computing the Internet Checksum
*/
#include <linux/in6.h>
#include <asm/uaccess.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* the same as csum_partial, but copies from fs:src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
register unsigned int ret asm("o0") = (unsigned int)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
__asm__ __volatile__ (
"call __csum_partial_copy_sparc_generic\n\t"
" mov %6, %%g7\n"
: "=&r" (ret), "=&r" (d), "=&r" (l)
: "0" (ret), "1" (d), "2" (l), "r" (sum)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5", "g7",
"memory", "cc");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err)
{
register unsigned long ret asm("o0") = (unsigned long)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,2\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err)
{
if (!access_ok (VERIFY_WRITE, dst, len)) {
*err = -EFAULT;
return sum;
} else {
register unsigned long ret asm("o0") = (unsigned long)src;
register char __user *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,1\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
}
#define HAVE_CSUM_COPY_USER
#define csum_and_copy_to_user csum_partial_copy_to_user
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
__sum16 sum;
/* Note: We must read %2 before we touch %0 for the first time,
* because GCC can legitimately use the same register for
* both operands.
*/
__asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
"ld\t[%1 + 0x00], %0\n\t"
"ld\t[%1 + 0x04], %%g2\n\t"
"ld\t[%1 + 0x08], %%g3\n\t"
"addcc\t%%g2, %0, %0\n\t"
"addxcc\t%%g3, %0, %0\n\t"
"ld\t[%1 + 0x0c], %%g2\n\t"
"ld\t[%1 + 0x10], %%g3\n\t"
"addxcc\t%%g2, %0, %0\n\t"
"addx\t%0, %%g0, %0\n"
"1:\taddcc\t%%g3, %0, %0\n\t"
"add\t%1, 4, %1\n\t"
"addxcc\t%0, %%g0, %0\n\t"
"subcc\t%%g4, 1, %%g4\n\t"
"be,a\t2f\n\t"
"sll\t%0, 16, %%g2\n\t"
"b\t1b\n\t"
"ld\t[%1 + 0x10], %%g3\n"
"2:\taddcc\t%0, %%g2, %%g2\n\t"
"srl\t%%g2, 16, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
"xnor\t%%g0, %0, %0"
: "=r" (sum), "=&r" (iph)
: "r" (ihl), "1" (iph)
: "g2", "g3", "g4", "cc", "memory");
return sum;
}
/* Fold a partial checksum without adding pseudo headers. */
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp;
__asm__ __volatile__("addcc\t%0, %1, %1\n\t"
"srl\t%1, 16, %1\n\t"
"addx\t%1, %%g0, %1\n\t"
"xnor\t%%g0, %1, %0"
: "=&r" (sum), "=r" (tmp)
: "0" (sum), "1" ((__force u32)sum<<16)
: "cc");
return (__force __sum16)sum;
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
__asm__ __volatile__("addcc\t%1, %0, %0\n\t"
"addxcc\t%2, %0, %0\n\t"
"addxcc\t%3, %0, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
: "=r" (sum), "=r" (saddr)
: "r" (daddr), "r" (proto + len), "0" (sum),
"1" (saddr)
: "cc");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__ __volatile__ (
"addcc %3, %4, %%g4\n\t"
"addxcc %5, %%g4, %%g4\n\t"
"ld [%2 + 0x0c], %%g2\n\t"
"ld [%2 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%2 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%2 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x0c], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"addxcc %%g3, %%g4, %0\n\t"
"addx 0, %0, %0\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr),
"r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
: "g2", "g3", "g4", "cc");
return csum_fold(sum);
}
/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#endif /* !(__SPARC_CHECKSUM_H) */
#ifndef __SPARC64_CHECKSUM_H
#define __SPARC64_CHECKSUM_H
/* checksum.h: IP/UDP/TCP checksum routines on the V9.
*
* Copyright(C) 1995 Linus Torvalds
* Copyright(C) 1995 Miguel de Icaza
* Copyright(C) 1996 David S. Miller
* Copyright(C) 1996 Eddie C. Dost
* Copyright(C) 1997 Jakub Jelinek
*
* derived from:
* Alpha checksum c-code
* ix86 inline assembly
* RFC1071 Computing the Internet Checksum
*/
#include <linux/in6.h>
#include <asm/uaccess.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern __wsum csum_partial(const void * buff, int len, __wsum sum);
/* the same as csum_partial, but copies from user space while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
extern long __csum_partial_copy_from_user(const void __user *src,
void *dst, int len,
__wsum sum);
static inline __wsum
csum_partial_copy_from_user(const void __user *src,
void *dst, int len,
__wsum sum, int *err)
{
long ret = __csum_partial_copy_from_user(src, dst, len, sum);
if (ret < 0)
*err = -EFAULT;
return (__force __wsum) ret;
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
extern long __csum_partial_copy_to_user(const void *src,
void __user *dst, int len,
__wsum sum);
static inline __wsum
csum_and_copy_to_user(const void *src,
void __user *dst, int len,
__wsum sum, int *err)
{
long ret = __csum_partial_copy_to_user(src, dst, len, sum);
if (ret < 0)
*err = -EFAULT;
return (__force __wsum) ret;
}
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/* Fold a partial checksum without adding pseudo headers. */
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp;
__asm__ __volatile__(
" addcc %0, %1, %1\n"
" srl %1, 16, %1\n"
" addc %1, %%g0, %1\n"
" xnor %%g0, %1, %0\n"
: "=&r" (sum), "=r" (tmp)
: "0" (sum), "1" ((__force u32)sum<<16)
: "cc");
return (__force __sum16)sum;
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned int len,
unsigned short proto,
__wsum sum)
{
__asm__ __volatile__(
" addcc %1, %0, %0\n"
" addccc %2, %0, %0\n"
" addccc %3, %0, %0\n"
" addc %0, %%g0, %0\n"
: "=r" (sum), "=r" (saddr)
: "r" (daddr), "r" (proto + len), "0" (sum), "1" (saddr)
: "cc");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__ __volatile__ (
" addcc %3, %4, %%g7\n"
" addccc %5, %%g7, %%g7\n"
" lduw [%2 + 0x0c], %%g2\n"
" lduw [%2 + 0x08], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" lduw [%2 + 0x04], %%g2\n"
" addccc %%g3, %%g7, %%g7\n"
" lduw [%2 + 0x00], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" lduw [%1 + 0x0c], %%g2\n"
" addccc %%g3, %%g7, %%g7\n"
" lduw [%1 + 0x08], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" lduw [%1 + 0x04], %%g2\n"
" addccc %%g3, %%g7, %%g7\n"
" lduw [%1 + 0x00], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" addccc %%g3, %%g7, %0\n"
" addc 0, %0, %0\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr), "r"(htonl(len)),
"r"(htonl(proto)), "r"(sum)
: "g2", "g3", "g7", "cc");
return csum_fold(sum);
}
/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#endif /* !(__SPARC64_CHECKSUM_H) */
/* cpudata.h: Per-cpu parameters.
*
* Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
*
* Based on include/asm-sparc64/cpudata.h and Linux 2.4 smp.h
* both (C) David S. Miller.
*/
#ifndef _SPARC_CPUDATA_H
#define _SPARC_CPUDATA_H
#include <linux/percpu.h>
typedef struct {
unsigned long udelay_val;
unsigned long clock_tick;
unsigned int multiplier;
unsigned int counter;
int prom_node;
int mid;
int next;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#endif /* _SPARC_CPUDATA_H */
#ifndef ___ASM_SPARC_CPUDATA_H
#define ___ASM_SPARC_CPUDATA_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/cpudata_64.h>
#else
#include <asm-sparc/cpudata_32.h>
#endif
#endif
/* cpudata.h: Per-cpu parameters.
*
* Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
*
* Based on include/asm-sparc64/cpudata.h and Linux 2.4 smp.h
* both (C) David S. Miller.
*/
#ifndef _SPARC_CPUDATA_H
#define _SPARC_CPUDATA_H
#include <linux/percpu.h>
typedef struct {
unsigned long udelay_val;
unsigned long clock_tick;
unsigned int multiplier;
unsigned int counter;
int prom_node;
int mid;
int next;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#endif /* _SPARC_CPUDATA_H */
/* cpudata.h: Per-cpu parameters.
*
* Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net)
*/
#ifndef _SPARC64_CPUDATA_H
#define _SPARC64_CPUDATA_H
#include <asm/hypervisor.h>
#include <asm/asi.h>
#ifndef __ASSEMBLY__
#include <linux/percpu.h>
#include <linux/threads.h>
typedef struct {
/* Dcache line 1 */
unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
unsigned int __pad0;
unsigned long clock_tick; /* %tick's per second */
unsigned long __pad;
unsigned int __pad1;
unsigned int __pad2;
/* Dcache line 2, rarely used */
unsigned int dcache_size;
unsigned int dcache_line_size;
unsigned int icache_size;
unsigned int icache_line_size;
unsigned int ecache_size;
unsigned int ecache_line_size;
int core_id;
int proc_id;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#define local_cpu_data() __get_cpu_var(__cpu_data)
/* Trap handling code needs to get at a few critical values upon
* trap entry and to process TSB misses. These cannot be in the
* per_cpu() area as we really need to lock them into the TLB and
* thus make them part of the main kernel image. As a result we
* try to make this as small as possible.
*
* This is padded out and aligned to 64-bytes to avoid false sharing
* on SMP.
*/
/* If you modify the size of this structure, please update
* TRAP_BLOCK_SZ_SHIFT below.
*/
struct thread_info;
struct trap_per_cpu {
/* D-cache line 1: Basic thread information, cpu and device mondo queues */
struct thread_info *thread;
unsigned long pgd_paddr;
unsigned long cpu_mondo_pa;
unsigned long dev_mondo_pa;
/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
unsigned long resum_mondo_pa;
unsigned long resum_kernel_buf_pa;
unsigned long nonresum_mondo_pa;
unsigned long nonresum_kernel_buf_pa;
/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
struct hv_fault_status fault_info;
/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
unsigned long cpu_mondo_block_pa;
unsigned long cpu_list_pa;
unsigned long tsb_huge;
unsigned long tsb_huge_temp;
/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
unsigned long irq_worklist_pa;
unsigned int cpu_mondo_qmask;
unsigned int dev_mondo_qmask;
unsigned int resum_qmask;
unsigned int nonresum_qmask;
void *hdesc;
} __attribute__((aligned(64)));
extern struct trap_per_cpu trap_block[NR_CPUS];
extern void init_cur_cpu_trap(struct thread_info *);
extern void setup_tba(void);
extern int ncpus_probed;
extern void __init cpu_probe(void);
extern const struct seq_operations cpuinfo_op;
extern unsigned long real_hard_smp_processor_id(void);
struct cpuid_patch_entry {
unsigned int addr;
unsigned int cheetah_safari[4];
unsigned int cheetah_jbus[4];
unsigned int starfire[4];
unsigned int sun4v[4];
};
extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
struct sun4v_1insn_patch_entry {
unsigned int addr;
unsigned int insn;
};
extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
__sun4v_1insn_patch_end;
struct sun4v_2insn_patch_entry {
unsigned int addr;
unsigned int insns[2];
};
extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
__sun4v_2insn_patch_end;
#endif /* !(__ASSEMBLY__) */
#define TRAP_PER_CPU_THREAD 0x00
#define TRAP_PER_CPU_PGD_PADDR 0x08
#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
#define TRAP_PER_CPU_FAULT_INFO 0x40
#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
#define TRAP_PER_CPU_TSB_HUGE 0xd0
#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
#define TRAP_PER_CPU_RESUM_QMASK 0xf0
#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
#define TRAP_BLOCK_SZ_SHIFT 8
#include <asm/scratchpad.h>
#define __GET_CPUID(REG) \
/* Spitfire implementation (default). */ \
661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
srlx REG, 17, REG; \
and REG, 0x1f, REG; \
nop; \
.section .cpuid_patch, "ax"; \
/* Instruction location. */ \
.word 661b; \
/* Cheetah Safari implementation. */ \
ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
srlx REG, 17, REG; \
and REG, 0x3ff, REG; \
nop; \
/* Cheetah JBUS implementation. */ \
ldxa [%g0] ASI_JBUS_CONFIG, REG; \
srlx REG, 17, REG; \
and REG, 0x1f, REG; \
nop; \
/* Starfire implementation. */ \
sethi %hi(0x1fff40000d0 >> 9), REG; \
sllx REG, 9, REG; \
or REG, 0xd0, REG; \
lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
/* sun4v implementation. */ \
mov SCRATCHPAD_CPUID, REG; \
ldxa [REG] ASI_SCRATCHPAD, REG; \
nop; \
nop; \
.previous;
#ifdef CONFIG_SMP
#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
__GET_CPUID(TMP) \
sethi %hi(trap_block), DEST; \
sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
or DEST, %lo(trap_block), DEST; \
add DEST, TMP, DEST; \
/* Clobbers TMP, current address space PGD phys address into DEST. */
#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
/* Clobbers TMP, loads DEST with current thread info pointer. */
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
/* Given the current thread info pointer in THR, load the per-cpu
* area base of the current processor into DEST. REG1, REG2, and REG3 are
* clobbered.
*
* You absolutely cannot use DEST as a temporary in this code. The
* reason is that traps can happen during execution, and return from
* trap will load the fully resolved DEST per-cpu base. This can corrupt
* the calculations done by the macro mid-stream.
*/
#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
lduh [THR + TI_CPU], REG1; \
sethi %hi(__per_cpu_shift), REG3; \
sethi %hi(__per_cpu_base), REG2; \
ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
ldx [REG2 + %lo(__per_cpu_base)], REG2; \
sllx REG1, REG3, REG3; \
add REG3, REG2, DEST;
#else
#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
sethi %hi(trap_block), DEST; \
or DEST, %lo(trap_block), DEST; \
/* Uniprocessor versions, we know the cpuid is zero. */
#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
/* No per-cpu areas on uniprocessor, so no need to load DEST. */
#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
#endif /* !(CONFIG_SMP) */
#endif /* _SPARC64_CPUDATA_H */
/*
* delay.h: Linux delay routines on the Sparc.
*
* Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu).
*/
#ifndef __SPARC_DELAY_H
#define __SPARC_DELAY_H
#include <asm/cpudata.h>
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__("cmp %0, 0\n\t"
"1: bne 1b\n\t"
"subcc %0, 1, %0\n" :
"=&r" (loops) :
"0" (loops) :
"cc");
}
/* This is too messy with inline asm on the Sparc. */
extern void __udelay(unsigned long usecs, unsigned long lpj);
extern void __ndelay(unsigned long nsecs, unsigned long lpj);
#ifdef CONFIG_SMP
#define __udelay_val cpu_data(smp_processor_id()).udelay_val
#else /* SMP */
#define __udelay_val loops_per_jiffy
#endif /* SMP */
#define udelay(__usecs) __udelay(__usecs, __udelay_val)
#define ndelay(__nsecs) __ndelay(__nsecs, __udelay_val)
#endif /* defined(__SPARC_DELAY_H) */
#ifndef ___ASM_SPARC_DELAY_H
#define ___ASM_SPARC_DELAY_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/delay_64.h>
#else
#include <asm-sparc/delay_32.h>
#endif
#endif
/*
* delay.h: Linux delay routines on the Sparc.
*
* Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu).
*/
#ifndef __SPARC_DELAY_H
#define __SPARC_DELAY_H
#include <asm/cpudata.h>
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__("cmp %0, 0\n\t"
"1: bne 1b\n\t"
"subcc %0, 1, %0\n" :
"=&r" (loops) :
"0" (loops) :
"cc");
}
/* This is too messy with inline asm on the Sparc. */
extern void __udelay(unsigned long usecs, unsigned long lpj);
extern void __ndelay(unsigned long nsecs, unsigned long lpj);
#ifdef CONFIG_SMP
#define __udelay_val cpu_data(smp_processor_id()).udelay_val
#else /* SMP */
#define __udelay_val loops_per_jiffy
#endif /* SMP */
#define udelay(__usecs) __udelay(__usecs, __udelay_val)
#define ndelay(__nsecs) __ndelay(__nsecs, __udelay_val)
#endif /* defined(__SPARC_DELAY_H) */
/* delay.h: Linux delay routines on sparc64.
*
* Copyright (C) 1996, 2004, 2007 David S. Miller (davem@davemloft.net).
*/
#ifndef _SPARC64_DELAY_H
#define _SPARC64_DELAY_H
#ifndef __ASSEMBLY__
extern void __delay(unsigned long loops);
extern void udelay(unsigned long usecs);
#define mdelay(n) udelay((n) * 1000)
#endif /* !__ASSEMBLY__ */
#endif /* _SPARC64_DELAY_H */
#ifndef _ASM_SPARC_DMA_MAPPING_H
#define _ASM_SPARC_DMA_MAPPING_H
#ifdef CONFIG_PCI
#include <asm-generic/dma-mapping.h>
#ifndef ___ASM_SPARC_DMA_MAPPING_H
#define ___ASM_SPARC_DMA_MAPPING_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/dma-mapping_64.h>
#else
#include <asm-generic/dma-mapping-broken.h>
#endif /* PCI */
#endif /* _ASM_SPARC_DMA_MAPPING_H */
#include <asm-sparc/dma-mapping_32.h>
#endif
#endif
#ifndef _ASM_SPARC_DMA_MAPPING_H
#define _ASM_SPARC_DMA_MAPPING_H
#ifdef CONFIG_PCI
#include <asm-generic/dma-mapping.h>
#else
#include <asm-generic/dma-mapping-broken.h>
#endif /* PCI */
#endif /* _ASM_SPARC_DMA_MAPPING_H */
#ifndef _ASM_SPARC64_DMA_MAPPING_H
#define _ASM_SPARC64_DMA_MAPPING_H
#include <linux/scatterlist.h>
#include <linux/mm.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
struct dma_ops {
void *(*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void (*free_coherent)(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle);
dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
size_t size,
enum dma_data_direction direction);
void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction);
int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction);
void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
int nhwentries,
enum dma_data_direction direction);
void (*sync_single_for_cpu)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction);
void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
int nelems,
enum dma_data_direction direction);
};
extern const struct dma_ops *dma_ops;
extern int dma_supported(struct device *dev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 dma_mask);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
}
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
size_t size,
enum dma_data_direction direction)
{
return dma_ops->map_single(dev, cpu_addr, size, direction);
}
static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction)
{
dma_ops->unmap_single(dev, dma_addr, size, direction);
}
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
return dma_ops->map_single(dev, page_address(page) + offset,
size, direction);
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction)
{
dma_ops->unmap_single(dev, dma_address, size, direction);
}
static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
return dma_ops->map_sg(dev, sg, nents, direction);
}
static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
dma_ops->unmap_sg(dev, sg, nents, direction);
}
static inline void dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
}
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle,
size_t size,
enum dma_data_direction direction)
{
/* No flushing needed to sync cpu writes to the device. */
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction direction)
{
dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction direction)
{
/* No flushing needed to sync cpu writes to the device. */
}
static inline void dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
}
static inline void dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
/* No flushing needed to sync cpu writes to the device. */
}
static inline int dma_mapping_error(dma_addr_t dma_addr)
{
return (dma_addr == DMA_ERROR_CODE);
}
static inline int dma_get_cache_alignment(void)
{
/* no easy way to get cache size on all processors, so return
* the maximum possible, to be safe */
return (1 << INTERNODE_CACHE_SHIFT);
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
#endif /* _ASM_SPARC64_DMA_MAPPING_H */
This diff is collapsed.
This diff is collapsed.
/*
* include/asm-sparc64/dma.h
*
* Copyright 1996 (C) David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _ASM_SPARC64_DMA_H
#define _ASM_SPARC64_DMA_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <asm/sbus.h>
#include <asm/delay.h>
#include <asm/oplib.h>
/* These are irrelevant for Sparc DMA, but we leave it in so that
* things can compile.
*/
#define MAX_DMA_CHANNELS 8
#define DMA_MODE_READ 1
#define DMA_MODE_WRITE 2
#define MAX_DMA_ADDRESS (~0UL)
/* Useful constants */
#define SIZE_16MB (16*1024*1024)
#define SIZE_64K (64*1024)
/* SBUS DMA controller reg offsets */
#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
/* DVMA chip revisions */
enum dvma_rev {
dvmarev0,
dvmaesc1,
dvmarev1,
dvmarev2,
dvmarev3,
dvmarevplus,
dvmahme
};
#define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
/* Linux DMA information structure, filled during probe. */
struct sbus_dma {
struct sbus_dma *next;
struct sbus_dev *sdev;
void __iomem *regs;
/* Status, misc info */
int node; /* Prom node for this DMA device */
int running; /* Are we doing DMA now? */
int allocated; /* Are we "owned" by anyone yet? */
/* Transfer information. */
u32 addr; /* Start address of current transfer */
int nbytes; /* Size of current transfer */
int realbytes; /* For splitting up large transfers, etc. */
/* DMA revision */
enum dvma_rev revision;
};
extern struct sbus_dma *dma_chain;
/* Broken hardware... */
#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1)
#define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
/* Main routines in dma.c */
extern void dvma_init(struct sbus_bus *);
/* Fields in the cond_reg register */
/* First, the version identification bits */
#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
#define DMA_VERS0 0x00000000 /* Sunray DMA version */
#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
#define DMA_VERS1 0x80000000 /* DMA rev 1 */
#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
#define DMA_BRST64 0x000c0000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
#define DMA_BRST32 0x00040000 /* SCSI: 32byte bursts */
#define DMA_BRST16 0x00000000 /* SCSI: 16byte bursts */
#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
/* Values describing the burst-size property from the PROM */
#define DMA_BURST1 0x01
#define DMA_BURST2 0x02
#define DMA_BURST4 0x04
#define DMA_BURST8 0x08
#define DMA_BURST16 0x10
#define DMA_BURST32 0x20
#define DMA_BURST64 0x40
#define DMA_BURSTBITS 0x7f
/* Determine highest possible final transfer address given a base */
#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
/* Yes, I hack a lot of elisp in my spare time... */
#define DMA_ERROR_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_HNDL_ERROR))
#define DMA_IRQ_P(regs) ((sbus_readl((regs) + DMA_CSR)) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
#define DMA_WRITE_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_ST_WRITE))
#define DMA_OFF(__regs) \
do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
tmp &= ~DMA_ENABLE; \
sbus_writel(tmp, (__regs) + DMA_CSR); \
} while(0)
#define DMA_INTSOFF(__regs) \
do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
tmp &= ~DMA_INT_ENAB; \
sbus_writel(tmp, (__regs) + DMA_CSR); \
} while(0)
#define DMA_INTSON(__regs) \
do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
tmp |= DMA_INT_ENAB; \
sbus_writel(tmp, (__regs) + DMA_CSR); \
} while(0)
#define DMA_PUNTFIFO(__regs) \
do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
tmp |= DMA_FIFO_INV; \
sbus_writel(tmp, (__regs) + DMA_CSR); \
} while(0)
#define DMA_SETSTART(__regs, __addr) \
sbus_writel((u32)(__addr), (__regs) + DMA_ADDR);
#define DMA_BEGINDMA_W(__regs) \
do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
tmp |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB); \
sbus_writel(tmp, (__regs) + DMA_CSR); \
} while(0)
#define DMA_BEGINDMA_R(__regs) \
do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
tmp |= (DMA_ENABLE|DMA_INT_ENAB); \
tmp &= ~DMA_ST_WRITE; \
sbus_writel(tmp, (__regs) + DMA_CSR); \
} while(0)
/* For certain DMA chips, we need to disable ints upon irq entry
* and turn them back on when we are done. So in any ESP interrupt
* handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
* when leaving the handler. You have been warned...
*/
#define DMA_IRQ_ENTRY(dma, dregs) do { \
if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
} while (0)
#define DMA_IRQ_EXIT(dma, dregs) do { \
if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
} while(0)
#define for_each_dvma(dma) \
for((dma) = dma_chain; (dma); (dma) = (dma)->next)
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* !(_ASM_SPARC64_DMA_H) */
/*
* ebus.h: PCI to Ebus pseudo driver software state.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
*
* Adopted for sparc by V. Roganov and G. Raiko.
*/
#ifndef __SPARC_EBUS_H
#define __SPARC_EBUS_H
#ifndef _LINUX_IOPORT_H
#include <linux/ioport.h>
#ifndef ___ASM_SPARC_EBUS_H
#define ___ASM_SPARC_EBUS_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/ebus_64.h>
#else
#include <asm-sparc/ebus_32.h>
#endif
#endif
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/of_device.h>
struct linux_ebus_child {
struct linux_ebus_child *next;
struct linux_ebus_device *parent;
struct linux_ebus *bus;
struct device_node *prom_node;
struct resource resource[PROMREG_MAX];
int num_addrs;
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
};
struct linux_ebus_device {
struct of_device ofdev;
struct linux_ebus_device *next;
struct linux_ebus_child *children;
struct linux_ebus *bus;
struct device_node *prom_node;
struct resource resource[PROMREG_MAX];
int num_addrs;
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
};
#define to_ebus_device(d) container_of(d, struct linux_ebus_device, ofdev.dev)
struct linux_ebus {
struct of_device ofdev;
struct linux_ebus *next;
struct linux_ebus_device *devices;
struct linux_pbm_info *parent;
struct pci_dev *self;
struct device_node *prom_node;
};
#define to_ebus(d) container_of(d, struct linux_ebus, ofdev.dev)
struct linux_ebus_dma {
unsigned int dcsr;
unsigned int dacr;
unsigned int dbcr;
};
#define EBUS_DCSR_INT_PEND 0x00000001
#define EBUS_DCSR_ERR_PEND 0x00000002
#define EBUS_DCSR_DRAIN 0x00000004
#define EBUS_DCSR_INT_EN 0x00000010
#define EBUS_DCSR_RESET 0x00000080
#define EBUS_DCSR_WRITE 0x00000100
#define EBUS_DCSR_EN_DMA 0x00000200
#define EBUS_DCSR_CYC_PEND 0x00000400
#define EBUS_DCSR_DIAG_RD_DONE 0x00000800
#define EBUS_DCSR_DIAG_WR_DONE 0x00001000
#define EBUS_DCSR_EN_CNT 0x00002000
#define EBUS_DCSR_TC 0x00004000
#define EBUS_DCSR_DIS_CSR_DRN 0x00010000
#define EBUS_DCSR_BURST_SZ_MASK 0x000c0000
#define EBUS_DCSR_BURST_SZ_1 0x00080000
#define EBUS_DCSR_BURST_SZ_4 0x00000000
#define EBUS_DCSR_BURST_SZ_8 0x00040000
#define EBUS_DCSR_BURST_SZ_16 0x000c0000
#define EBUS_DCSR_DIAG_EN 0x00100000
#define EBUS_DCSR_DIS_ERR_PEND 0x00400000
#define EBUS_DCSR_TCI_DIS 0x00800000
#define EBUS_DCSR_EN_NEXT 0x01000000
#define EBUS_DCSR_DMA_ON 0x02000000
#define EBUS_DCSR_A_LOADED 0x04000000
#define EBUS_DCSR_NA_LOADED 0x08000000
#define EBUS_DCSR_DEV_ID_MASK 0xf0000000
extern struct linux_ebus *ebus_chain;
extern void ebus_init(void);
#define for_each_ebus(bus) \
for((bus) = ebus_chain; (bus); (bus) = (bus)->next)
#define for_each_ebusdev(dev, bus) \
for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
#define for_each_edevchild(dev, child) \
for((child) = (dev)->children; (child); (child) = (child)->next)
#endif /* !(__SPARC_EBUS_H) */
/*
* ebus.h: PCI to Ebus pseudo driver software state.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
*
* Adopted for sparc by V. Roganov and G. Raiko.
*/
#ifndef __SPARC_EBUS_H
#define __SPARC_EBUS_H
#ifndef _LINUX_IOPORT_H
#include <linux/ioport.h>
#endif
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/of_device.h>
struct linux_ebus_child {
struct linux_ebus_child *next;
struct linux_ebus_device *parent;
struct linux_ebus *bus;
struct device_node *prom_node;
struct resource resource[PROMREG_MAX];
int num_addrs;
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
};
struct linux_ebus_device {
struct of_device ofdev;
struct linux_ebus_device *next;
struct linux_ebus_child *children;
struct linux_ebus *bus;
struct device_node *prom_node;
struct resource resource[PROMREG_MAX];
int num_addrs;
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
};
#define to_ebus_device(d) container_of(d, struct linux_ebus_device, ofdev.dev)
struct linux_ebus {
struct of_device ofdev;
struct linux_ebus *next;
struct linux_ebus_device *devices;
struct linux_pbm_info *parent;
struct pci_dev *self;
struct device_node *prom_node;
};
#define to_ebus(d) container_of(d, struct linux_ebus, ofdev.dev)
struct linux_ebus_dma {
unsigned int dcsr;
unsigned int dacr;
unsigned int dbcr;
};
#define EBUS_DCSR_INT_PEND 0x00000001
#define EBUS_DCSR_ERR_PEND 0x00000002
#define EBUS_DCSR_DRAIN 0x00000004
#define EBUS_DCSR_INT_EN 0x00000010
#define EBUS_DCSR_RESET 0x00000080
#define EBUS_DCSR_WRITE 0x00000100
#define EBUS_DCSR_EN_DMA 0x00000200
#define EBUS_DCSR_CYC_PEND 0x00000400
#define EBUS_DCSR_DIAG_RD_DONE 0x00000800
#define EBUS_DCSR_DIAG_WR_DONE 0x00001000
#define EBUS_DCSR_EN_CNT 0x00002000
#define EBUS_DCSR_TC 0x00004000
#define EBUS_DCSR_DIS_CSR_DRN 0x00010000
#define EBUS_DCSR_BURST_SZ_MASK 0x000c0000
#define EBUS_DCSR_BURST_SZ_1 0x00080000
#define EBUS_DCSR_BURST_SZ_4 0x00000000
#define EBUS_DCSR_BURST_SZ_8 0x00040000
#define EBUS_DCSR_BURST_SZ_16 0x000c0000
#define EBUS_DCSR_DIAG_EN 0x00100000
#define EBUS_DCSR_DIS_ERR_PEND 0x00400000
#define EBUS_DCSR_TCI_DIS 0x00800000
#define EBUS_DCSR_EN_NEXT 0x01000000
#define EBUS_DCSR_DMA_ON 0x02000000
#define EBUS_DCSR_A_LOADED 0x04000000
#define EBUS_DCSR_NA_LOADED 0x08000000
#define EBUS_DCSR_DEV_ID_MASK 0xf0000000
extern struct linux_ebus *ebus_chain;
extern void ebus_init(void);
#define for_each_ebus(bus) \
for((bus) = ebus_chain; (bus); (bus) = (bus)->next)
#define for_each_ebusdev(dev, bus) \
for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
#define for_each_edevchild(dev, child) \
for((child) = (dev)->children; (child); (child) = (child)->next)
#endif /* !(__SPARC_EBUS_H) */
/*
* ebus.h: PCI to Ebus pseudo driver software state.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/
#ifndef __SPARC64_EBUS_H
#define __SPARC64_EBUS_H
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/of_device.h>
struct linux_ebus_child {
struct linux_ebus_child *next;
struct linux_ebus_device *parent;
struct linux_ebus *bus;
struct device_node *prom_node;
struct resource resource[PROMREG_MAX];
int num_addrs;
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
};
struct linux_ebus_device {
struct of_device ofdev;
struct linux_ebus_device *next;
struct linux_ebus_child *children;
struct linux_ebus *bus;
struct device_node *prom_node;
struct resource resource[PROMREG_MAX];
int num_addrs;
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
};
#define to_ebus_device(d) container_of(d, struct linux_ebus_device, ofdev.dev)
struct linux_ebus {
struct of_device ofdev;
struct linux_ebus *next;
struct linux_ebus_device *devices;
struct pci_dev *self;
int index;
int is_rio;
struct device_node *prom_node;
};
#define to_ebus(d) container_of(d, struct linux_ebus, ofdev.dev)
struct ebus_dma_info {
spinlock_t lock;
void __iomem *regs;
unsigned int flags;
#define EBUS_DMA_FLAG_USE_EBDMA_HANDLER 0x00000001
#define EBUS_DMA_FLAG_TCI_DISABLE 0x00000002
/* These are only valid is EBUS_DMA_FLAG_USE_EBDMA_HANDLER is
* set.
*/
void (*callback)(struct ebus_dma_info *p, int event, void *cookie);
void *client_cookie;
unsigned int irq;
#define EBUS_DMA_EVENT_ERROR 1
#define EBUS_DMA_EVENT_DMA 2
#define EBUS_DMA_EVENT_DEVICE 4
unsigned char name[64];
};
extern int ebus_dma_register(struct ebus_dma_info *p);
extern int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
extern void ebus_dma_unregister(struct ebus_dma_info *p);
extern int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
size_t len);
extern void ebus_dma_prepare(struct ebus_dma_info *p, int write);
extern unsigned int ebus_dma_residue(struct ebus_dma_info *p);
extern unsigned int ebus_dma_addr(struct ebus_dma_info *p);
extern void ebus_dma_enable(struct ebus_dma_info *p, int on);
extern struct linux_ebus *ebus_chain;
extern void ebus_init(void);
#define for_each_ebus(bus) \
for((bus) = ebus_chain; (bus); (bus) = (bus)->next)
#define for_each_ebusdev(dev, bus) \
for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
#define for_each_edevchild(dev, child) \
for((child) = (dev)->children; (child); (child) = (child)->next)
#endif /* !(__SPARC64_EBUS_H) */
#ifndef __ASMSPARC_ELF_H
#define __ASMSPARC_ELF_H
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
/*
* Sparc section types
*/
#define STT_REGISTER 13
/*
* Sparc ELF relocation types
*/
#define R_SPARC_NONE 0
#define R_SPARC_8 1
#define R_SPARC_16 2
#define R_SPARC_32 3
#define R_SPARC_DISP8 4
#define R_SPARC_DISP16 5
#define R_SPARC_DISP32 6
#define R_SPARC_WDISP30 7
#define R_SPARC_WDISP22 8
#define R_SPARC_HI22 9
#define R_SPARC_22 10
#define R_SPARC_13 11
#define R_SPARC_LO10 12
#define R_SPARC_GOT10 13
#define R_SPARC_GOT13 14
#define R_SPARC_GOT22 15
#define R_SPARC_PC10 16
#define R_SPARC_PC22 17
#define R_SPARC_WPLT30 18
#define R_SPARC_COPY 19
#define R_SPARC_GLOB_DAT 20
#define R_SPARC_JMP_SLOT 21
#define R_SPARC_RELATIVE 22
#define R_SPARC_UA32 23
#define R_SPARC_PLT32 24
#define R_SPARC_HIPLT22 25
#define R_SPARC_LOPLT10 26
#define R_SPARC_PCPLT32 27
#define R_SPARC_PCPLT22 28
#define R_SPARC_PCPLT10 29
#define R_SPARC_10 30
#define R_SPARC_11 31
#define R_SPARC_64 32
#define R_SPARC_OLO10 33
#define R_SPARC_WDISP16 40
#define R_SPARC_WDISP19 41
#define R_SPARC_7 43
#define R_SPARC_5 44
#define R_SPARC_6 45
/* Bits present in AT_HWCAP, primarily for Sparc32. */
#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
#define HWCAP_SPARC_STBAR 2
#define HWCAP_SPARC_SWAP 4
#define HWCAP_SPARC_MULDIV 8
#define HWCAP_SPARC_V9 16
#define HWCAP_SPARC_ULTRA3 32
#define CORE_DUMP_USE_REGSET
/* Format is:
* G0 --> G7
* O0 --> O7
* L0 --> L7
* I0 --> I7
* PSR, PC, nPC, Y, WIM, TBR
*/
typedef unsigned long elf_greg_t;
#define ELF_NGREG 38
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct {
union {
unsigned long pr_regs[32];
double pr_dregs[16];
} pr_fr;
unsigned long __unused;
unsigned long pr_fsr;
unsigned char pr_qcnt;
unsigned char pr_q_entrysize;
unsigned char pr_en;
unsigned int pr_q[64];
} elf_fpregset_t;
#include <asm/mbus.h>
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == EM_SPARC)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_ARCH EM_SPARC
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
#define USE_ELF_CORE_DUMP
#ifndef CONFIG_SUN4
#define ELF_EXEC_PAGESIZE 4096
#ifndef ___ASM_SPARC_ELF_H
#define ___ASM_SPARC_ELF_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/elf_64.h>
#else
#define ELF_EXEC_PAGESIZE 8192
#include <asm-sparc/elf_32.h>
#endif
#endif
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. This can NOT be done in userspace
on Sparc. */
/* Sun4c has none of the capabilities, most sun4m's have them all.
* XXX This is gross, set some global variable at boot time. -DaveM
*/
#define ELF_HWCAP ((ARCH_SUN4C_SUN4) ? 0 : \
(HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \
HWCAP_SPARC_SWAP | \
((srmmu_modtype != Cypress && \
srmmu_modtype != Cypress_vE && \
srmmu_modtype != Cypress_vD) ? \
HWCAP_SPARC_MULDIV : 0)))
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo. */
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
#endif /* !(__ASMSPARC_ELF_H) */
#ifndef __ASMSPARC_ELF_H
#define __ASMSPARC_ELF_H
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
/*
* Sparc section types
*/
#define STT_REGISTER 13
/*
* Sparc ELF relocation types
*/
#define R_SPARC_NONE 0
#define R_SPARC_8 1
#define R_SPARC_16 2
#define R_SPARC_32 3
#define R_SPARC_DISP8 4
#define R_SPARC_DISP16 5
#define R_SPARC_DISP32 6
#define R_SPARC_WDISP30 7
#define R_SPARC_WDISP22 8
#define R_SPARC_HI22 9
#define R_SPARC_22 10
#define R_SPARC_13 11
#define R_SPARC_LO10 12
#define R_SPARC_GOT10 13
#define R_SPARC_GOT13 14
#define R_SPARC_GOT22 15
#define R_SPARC_PC10 16
#define R_SPARC_PC22 17
#define R_SPARC_WPLT30 18
#define R_SPARC_COPY 19
#define R_SPARC_GLOB_DAT 20
#define R_SPARC_JMP_SLOT 21
#define R_SPARC_RELATIVE 22
#define R_SPARC_UA32 23
#define R_SPARC_PLT32 24
#define R_SPARC_HIPLT22 25
#define R_SPARC_LOPLT10 26
#define R_SPARC_PCPLT32 27
#define R_SPARC_PCPLT22 28
#define R_SPARC_PCPLT10 29
#define R_SPARC_10 30
#define R_SPARC_11 31
#define R_SPARC_64 32
#define R_SPARC_OLO10 33
#define R_SPARC_WDISP16 40
#define R_SPARC_WDISP19 41
#define R_SPARC_7 43
#define R_SPARC_5 44
#define R_SPARC_6 45
/* Bits present in AT_HWCAP, primarily for Sparc32. */
#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
#define HWCAP_SPARC_STBAR 2
#define HWCAP_SPARC_SWAP 4
#define HWCAP_SPARC_MULDIV 8
#define HWCAP_SPARC_V9 16
#define HWCAP_SPARC_ULTRA3 32
#define CORE_DUMP_USE_REGSET
/* Format is:
* G0 --> G7
* O0 --> O7
* L0 --> L7
* I0 --> I7
* PSR, PC, nPC, Y, WIM, TBR
*/
typedef unsigned long elf_greg_t;
#define ELF_NGREG 38
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct {
union {
unsigned long pr_regs[32];
double pr_dregs[16];
} pr_fr;
unsigned long __unused;
unsigned long pr_fsr;
unsigned char pr_qcnt;
unsigned char pr_q_entrysize;
unsigned char pr_en;
unsigned int pr_q[64];
} elf_fpregset_t;
#include <asm/mbus.h>
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == EM_SPARC)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_ARCH EM_SPARC
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
#define USE_ELF_CORE_DUMP
#ifndef CONFIG_SUN4
#define ELF_EXEC_PAGESIZE 4096
#else
#define ELF_EXEC_PAGESIZE 8192
#endif
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. This can NOT be done in userspace
on Sparc. */
/* Sun4c has none of the capabilities, most sun4m's have them all.
* XXX This is gross, set some global variable at boot time. -DaveM
*/
#define ELF_HWCAP ((ARCH_SUN4C_SUN4) ? 0 : \
(HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \
HWCAP_SPARC_SWAP | \
((srmmu_modtype != Cypress && \
srmmu_modtype != Cypress_vE && \
srmmu_modtype != Cypress_vD) ? \
HWCAP_SPARC_MULDIV : 0)))
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo. */
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
#endif /* !(__ASMSPARC_ELF_H) */
#ifndef __ASM_SPARC64_ELF_H
#define __ASM_SPARC64_ELF_H
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/spitfire.h>
/*
* Sparc section types
*/
#define STT_REGISTER 13
/*
* Sparc ELF relocation types
*/
#define R_SPARC_NONE 0
#define R_SPARC_8 1
#define R_SPARC_16 2
#define R_SPARC_32 3
#define R_SPARC_DISP8 4
#define R_SPARC_DISP16 5
#define R_SPARC_DISP32 6
#define R_SPARC_WDISP30 7
#define R_SPARC_WDISP22 8
#define R_SPARC_HI22 9
#define R_SPARC_22 10
#define R_SPARC_13 11
#define R_SPARC_LO10 12
#define R_SPARC_GOT10 13
#define R_SPARC_GOT13 14
#define R_SPARC_GOT22 15
#define R_SPARC_PC10 16
#define R_SPARC_PC22 17
#define R_SPARC_WPLT30 18
#define R_SPARC_COPY 19
#define R_SPARC_GLOB_DAT 20
#define R_SPARC_JMP_SLOT 21
#define R_SPARC_RELATIVE 22
#define R_SPARC_UA32 23
#define R_SPARC_PLT32 24
#define R_SPARC_HIPLT22 25
#define R_SPARC_LOPLT10 26
#define R_SPARC_PCPLT32 27
#define R_SPARC_PCPLT22 28
#define R_SPARC_PCPLT10 29
#define R_SPARC_10 30
#define R_SPARC_11 31
#define R_SPARC_64 32
#define R_SPARC_OLO10 33
#define R_SPARC_WDISP16 40
#define R_SPARC_WDISP19 41
#define R_SPARC_7 43
#define R_SPARC_5 44
#define R_SPARC_6 45
/* Bits present in AT_HWCAP, primarily for Sparc32. */
#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
#define HWCAP_SPARC_STBAR 2
#define HWCAP_SPARC_SWAP 4
#define HWCAP_SPARC_MULDIV 8
#define HWCAP_SPARC_V9 16
#define HWCAP_SPARC_ULTRA3 32
#define HWCAP_SPARC_BLKINIT 64
#define HWCAP_SPARC_N2 128
#define CORE_DUMP_USE_REGSET
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_ARCH EM_SPARCV9
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2MSB
/* Format of 64-bit elf_gregset_t is:
* G0 --> G7
* O0 --> O7
* L0 --> L7
* I0 --> I7
* TSTATE
* TPC
* TNPC
* Y
*/
typedef unsigned long elf_greg_t;
#define ELF_NGREG 36
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct {
unsigned long pr_regs[32];
unsigned long pr_fsr;
unsigned long pr_gsr;
unsigned long pr_fprs;
} elf_fpregset_t;
/* Format of 32-bit elf_gregset_t is:
* G0 --> G7
* O0 --> O7
* L0 --> L7
* I0 --> I7
* PSR, PC, nPC, Y, WIM, TBR
*/
typedef unsigned int compat_elf_greg_t;
#define COMPAT_ELF_NGREG 38
typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
typedef struct {
union {
unsigned int pr_regs[32];
unsigned long pr_dregs[16];
} pr_fr;
unsigned int __unused;
unsigned int pr_fsr;
unsigned char pr_qcnt;
unsigned char pr_q_entrysize;
unsigned char pr_en;
unsigned int pr_q[64];
} compat_elf_fpregset_t;
/* UltraSparc extensions. Still unused, but will be eventually. */
typedef struct {
unsigned int pr_type;
unsigned int pr_align;
union {
struct {
union {
unsigned int pr_regs[32];
unsigned long pr_dregs[16];
long double pr_qregs[8];
} pr_xfr;
} pr_v8p;
unsigned int pr_xfsr;
unsigned int pr_fprs;
unsigned int pr_xg[8];
unsigned int pr_xo[8];
unsigned long pr_tstate;
unsigned int pr_filler[8];
} pr_un;
} elf_xregset_t;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
#define compat_elf_check_arch(x) ((x)->e_machine == EM_SPARC || \
(x)->e_machine == EM_SPARC32PLUS)
#define compat_start_thread start_thread32
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE 0x0000010000000000UL
#define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. */
/* On Ultra, we support all of the v8 capabilities. */
static inline unsigned int sparc64_elf_hwcap(void)
{
unsigned int cap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
HWCAP_SPARC_V9);
if (tlb_type == cheetah || tlb_type == cheetah_plus)
cap |= HWCAP_SPARC_ULTRA3;
else if (tlb_type == hypervisor) {
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA2)
cap |= HWCAP_SPARC_BLKINIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2)
cap |= HWCAP_SPARC_N2;
}
return cap;
}
#define ELF_HWCAP sparc64_elf_hwcap();
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo. */
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex, ibcs2) \
do { unsigned long new_flags = current_thread_info()->flags; \
new_flags &= _TIF_32BIT; \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
new_flags |= _TIF_32BIT; \
else \
new_flags &= ~_TIF_32BIT; \
if ((current_thread_info()->flags & _TIF_32BIT) \
!= new_flags) \
set_thread_flag(TIF_ABI_PENDING); \
else \
clear_thread_flag(TIF_ABI_PENDING); \
/* flush_thread will update pgd cache */ \
if (ibcs2) \
set_personality(PER_SVR4); \
else if (current->personality != PER_LINUX32) \
set_personality(PER_LINUX); \
} while (0)
#endif /* !(__ASM_SPARC64_ELF_H) */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
#include <asm-generic/futex.h>
#ifndef ___ASM_SPARC_FUTEX_H
#define ___ASM_SPARC_FUTEX_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/futex_64.h>
#else
#include <asm-sparc/futex_32.h>
#endif
#endif
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
#include <asm-generic/futex.h>
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment