Commit 916448aa authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] M68k extern inline

From: Geert Uytterhoeven <geert@linux-m68k.org>

M68k core: Replace (variants of) `extern inline' by `static inline'
parent e8086cbd
......@@ -447,7 +447,7 @@ extern int mmu_emu_handle_fault (unsigned long, int, int);
/* sun3 version of bus_error030 */
extern inline void bus_error030 (struct frame *fp)
static inline void bus_error030 (struct frame *fp)
{
unsigned char buserr_type = sun3_get_buserr ();
unsigned long addr, errorcode;
......
......@@ -38,17 +38,14 @@ enum {
/* Convenience functions to stuff various integer values into int128s */
extern inline void zero128(int128 a)
static inline void zero128(int128 a)
{
a[LSW128] = a[NLSW128] = a[NMSW128] = a[MSW128] = 0;
}
/* Human-readable word order in the arguments */
extern inline void set128(unsigned int i3,
unsigned int i2,
unsigned int i1,
unsigned int i0,
int128 a)
static inline void set128(unsigned int i3, unsigned int i2, unsigned int i1,
unsigned int i0, int128 a)
{
a[LSW128] = i0;
a[NLSW128] = i1;
......@@ -57,21 +54,19 @@ extern inline void set128(unsigned int i3,
}
/* Convenience functions (for testing as well) */
extern inline void int64_to_128(unsigned long long src,
int128 dest)
static inline void int64_to_128(unsigned long long src, int128 dest)
{
dest[LSW128] = (unsigned int) src;
dest[NLSW128] = src >> 32;
dest[NMSW128] = dest[MSW128] = 0;
}
extern inline void int128_to_64(const int128 src,
unsigned long long *dest)
static inline void int128_to_64(const int128 src, unsigned long long *dest)
{
*dest = src[LSW128] | (long long) src[NLSW128] << 32;
}
extern inline void put_i128(const int128 a)
static inline void put_i128(const int128 a)
{
printk("%08x %08x %08x %08x\n", a[MSW128], a[NMSW128],
a[NLSW128], a[LSW128]);
......@@ -82,7 +77,7 @@ extern inline void put_i128(const int128 a)
Note that these are only good for 0 < count < 32.
*/
extern inline void _lsl128(unsigned int count, int128 a)
static inline void _lsl128(unsigned int count, int128 a)
{
a[MSW128] = (a[MSW128] << count) | (a[NMSW128] >> (32 - count));
a[NMSW128] = (a[NMSW128] << count) | (a[NLSW128] >> (32 - count));
......@@ -90,7 +85,7 @@ extern inline void _lsl128(unsigned int count, int128 a)
a[LSW128] <<= count;
}
extern inline void _lsr128(unsigned int count, int128 a)
static inline void _lsr128(unsigned int count, int128 a)
{
a[LSW128] = (a[LSW128] >> count) | (a[NLSW128] << (32 - count));
a[NLSW128] = (a[NLSW128] >> count) | (a[NMSW128] << (32 - count));
......@@ -100,7 +95,7 @@ extern inline void _lsr128(unsigned int count, int128 a)
/* Should be faster, one would hope */
extern inline void lslone128(int128 a)
static inline void lslone128(int128 a)
{
asm volatile ("lsl.l #1,%0\n"
"roxl.l #1,%1\n"
......@@ -118,7 +113,7 @@ extern inline void lslone128(int128 a)
"3"(a[MSW128]));
}
extern inline void lsrone128(int128 a)
static inline void lsrone128(int128 a)
{
asm volatile ("lsr.l #1,%0\n"
"roxr.l #1,%1\n"
......@@ -140,7 +135,7 @@ extern inline void lsrone128(int128 a)
These bit-shift to a multiple of 32, then move whole longwords. */
extern inline void lsl128(unsigned int count, int128 a)
static inline void lsl128(unsigned int count, int128 a)
{
int wordcount, i;
......@@ -159,7 +154,7 @@ extern inline void lsl128(unsigned int count, int128 a)
}
}
extern inline void lsr128(unsigned int count, int128 a)
static inline void lsr128(unsigned int count, int128 a)
{
int wordcount, i;
......@@ -177,18 +172,18 @@ extern inline void lsr128(unsigned int count, int128 a)
}
}
extern inline int orl128(int a, int128 b)
static inline int orl128(int a, int128 b)
{
b[LSW128] |= a;
}
extern inline int btsthi128(const int128 a)
static inline int btsthi128(const int128 a)
{
return a[MSW128] & 0x80000000;
}
/* test bits (numbered from 0 = LSB) up to and including "top" */
extern inline int bftestlo128(int top, const int128 a)
static inline int bftestlo128(int top, const int128 a)
{
int r = 0;
......@@ -206,7 +201,7 @@ extern inline int bftestlo128(int top, const int128 a)
/* Aargh. We need these because GCC is broken */
/* FIXME: do them in assembly, for goodness' sake! */
extern inline void mask64(int pos, unsigned long long *mask)
static inline void mask64(int pos, unsigned long long *mask)
{
*mask = 0;
......@@ -218,7 +213,7 @@ extern inline void mask64(int pos, unsigned long long *mask)
HI_WORD(*mask) = (1 << (pos - 32)) - 1;
}
extern inline void bset64(int pos, unsigned long long *dest)
static inline void bset64(int pos, unsigned long long *dest)
{
/* This conditional will be optimized away. Thanks, GCC! */
if (pos < 32)
......@@ -229,7 +224,7 @@ extern inline void bset64(int pos, unsigned long long *dest)
(HI_WORD(*dest)):"id"(pos - 32));
}
extern inline int btst64(int pos, unsigned long long dest)
static inline int btst64(int pos, unsigned long long dest)
{
if (pos < 32)
return (0 != (LO_WORD(dest) & (1 << pos)));
......@@ -237,7 +232,7 @@ extern inline int btst64(int pos, unsigned long long dest)
return (0 != (HI_WORD(dest) & (1 << (pos - 32))));
}
extern inline void lsl64(int count, unsigned long long *dest)
static inline void lsl64(int count, unsigned long long *dest)
{
if (count < 32) {
HI_WORD(*dest) = (HI_WORD(*dest) << count)
......@@ -250,7 +245,7 @@ extern inline void lsl64(int count, unsigned long long *dest)
LO_WORD(*dest) = 0;
}
extern inline void lsr64(int count, unsigned long long *dest)
static inline void lsr64(int count, unsigned long long *dest)
{
if (count < 32) {
LO_WORD(*dest) = (LO_WORD(*dest) >> count)
......@@ -264,7 +259,7 @@ extern inline void lsr64(int count, unsigned long long *dest)
}
#endif
extern inline void fp_denormalize(struct fp_ext *reg, unsigned int cnt)
static inline void fp_denormalize(struct fp_ext *reg, unsigned int cnt)
{
reg->exp += cnt;
......@@ -306,7 +301,7 @@ extern inline void fp_denormalize(struct fp_ext *reg, unsigned int cnt)
}
}
extern inline int fp_overnormalize(struct fp_ext *reg)
static inline int fp_overnormalize(struct fp_ext *reg)
{
int shift;
......@@ -324,7 +319,7 @@ extern inline int fp_overnormalize(struct fp_ext *reg)
return shift;
}
extern inline int fp_addmant(struct fp_ext *dest, struct fp_ext *src)
static inline int fp_addmant(struct fp_ext *dest, struct fp_ext *src)
{
int carry;
......@@ -340,7 +335,7 @@ extern inline int fp_addmant(struct fp_ext *dest, struct fp_ext *src)
return carry;
}
extern inline int fp_addcarry(struct fp_ext *reg)
static inline int fp_addcarry(struct fp_ext *reg)
{
if (++reg->exp == 0x7fff) {
if (reg->mant.m64)
......@@ -357,7 +352,8 @@ extern inline int fp_addcarry(struct fp_ext *reg)
return 1;
}
extern inline void fp_submant(struct fp_ext *dest, struct fp_ext *src1, struct fp_ext *src2)
static inline void fp_submant(struct fp_ext *dest, struct fp_ext *src1,
struct fp_ext *src2)
{
/* we assume here, gcc only insert move and a clr instr */
asm volatile ("sub.b %1,%0" : "=d,g" (dest->lowmant)
......@@ -407,7 +403,8 @@ extern inline void fp_submant(struct fp_ext *dest, struct fp_ext *src1, struct f
carry; \
})
extern inline void fp_multiplymant(union fp_mant128 *dest, struct fp_ext *src1, struct fp_ext *src2)
static inline void fp_multiplymant(union fp_mant128 *dest, struct fp_ext *src1,
struct fp_ext *src2)
{
union fp_mant64 temp;
......@@ -421,7 +418,8 @@ extern inline void fp_multiplymant(union fp_mant128 *dest, struct fp_ext *src1,
fp_addx96(dest, temp);
}
extern inline void fp_dividemant(union fp_mant128 *dest, struct fp_ext *src, struct fp_ext *div)
static inline void fp_dividemant(union fp_mant128 *dest, struct fp_ext *src,
struct fp_ext *div)
{
union fp_mant128 tmp;
union fp_mant64 tmp64;
......@@ -484,7 +482,7 @@ extern inline void fp_dividemant(union fp_mant128 *dest, struct fp_ext *src, str
}
#if 0
extern inline unsigned int fp_fls128(union fp_mant128 *src)
static inline unsigned int fp_fls128(union fp_mant128 *src)
{
unsigned long data;
unsigned int res, off;
......@@ -504,7 +502,7 @@ extern inline unsigned int fp_fls128(union fp_mant128 *src)
return res + off;
}
extern inline void fp_shiftmant128(union fp_mant128 *src, int shift)
static inline void fp_shiftmant128(union fp_mant128 *src, int shift)
{
unsigned long sticky;
......@@ -594,7 +592,8 @@ extern inline void fp_shiftmant128(union fp_mant128 *src, int shift)
}
#endif
extern inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src, int shift)
static inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src,
int shift)
{
unsigned long tmp;
......@@ -639,7 +638,7 @@ extern inline void fp_putmant128(struct fp_ext *dest, union fp_mant128 *src, int
}
#if 0 /* old code... */
extern inline int fls(unsigned int a)
static inline int fls(unsigned int a)
{
int r;
......@@ -649,7 +648,7 @@ extern inline int fls(unsigned int a)
}
/* fls = "find last set" (cf. ffs(3)) */
extern inline int fls128(const int128 a)
static inline int fls128(const int128 a)
{
if (a[MSW128])
return fls(a[MSW128]);
......@@ -668,12 +667,12 @@ extern inline int fls128(const int128 a)
return -1;
}
extern inline int zerop128(const int128 a)
static inline int zerop128(const int128 a)
{
return !(a[LSW128] | a[NLSW128] | a[NMSW128] | a[MSW128]);
}
extern inline int nonzerop128(const int128 a)
static inline int nonzerop128(const int128 a)
{
return (a[LSW128] | a[NLSW128] | a[NMSW128] | a[MSW128]);
}
......@@ -681,7 +680,7 @@ extern inline int nonzerop128(const int128 a)
/* Addition and subtraction */
/* Do these in "pure" assembly, because "extended" asm is unmanageable
here */
extern inline void add128(const int128 a, int128 b)
static inline void add128(const int128 a, int128 b)
{
/* rotating carry flags */
unsigned int carry[2];
......@@ -699,7 +698,7 @@ extern inline void add128(const int128 a, int128 b)
}
/* Note: assembler semantics: "b -= a" */
extern inline void sub128(const int128 a, int128 b)
static inline void sub128(const int128 a, int128 b)
{
/* rotating borrow flags */
unsigned int borrow[2];
......@@ -717,9 +716,7 @@ extern inline void sub128(const int128 a, int128 b)
}
/* Poor man's 64-bit expanding multiply */
extern inline void mul64(unsigned long long a,
unsigned long long b,
int128 c)
static inline void mul64(unsigned long long a, unsigned long long b, int128 c)
{
unsigned long long acc;
int128 acc128;
......@@ -756,7 +753,7 @@ extern inline void mul64(unsigned long long a,
}
/* Note: unsigned */
extern inline int cmp128(int128 a, int128 b)
static inline int cmp128(int128 a, int128 b)
{
if (a[MSW128] < b[MSW128])
return -1;
......
......@@ -180,14 +180,14 @@ static inline void atari_disable_irq( unsigned irq )
* "stored"
*/
extern inline void atari_turnon_irq( unsigned irq )
static inline void atari_turnon_irq( unsigned irq )
{
if (irq < STMFP_SOURCE_BASE || irq >= SCC_SOURCE_BASE) return;
set_mfp_bit( irq, MFP_ENABLE );
}
extern inline void atari_turnoff_irq( unsigned irq )
static inline void atari_turnoff_irq( unsigned irq )
{
if (irq < STMFP_SOURCE_BASE || irq >= SCC_SOURCE_BASE) return;
......@@ -195,14 +195,14 @@ extern inline void atari_turnoff_irq( unsigned irq )
clear_mfp_bit( irq, MFP_PENDING );
}
extern inline void atari_clear_pending_irq( unsigned irq )
static inline void atari_clear_pending_irq( unsigned irq )
{
if (irq < STMFP_SOURCE_BASE || irq >= SCC_SOURCE_BASE) return;
clear_mfp_bit( irq, MFP_PENDING );
}
extern inline int atari_irq_pending( unsigned irq )
static inline int atari_irq_pending( unsigned irq )
{
if (irq < STMFP_SOURCE_BASE || irq >= SCC_SOURCE_BASE) return( 0 );
......
......@@ -21,7 +21,8 @@
__constant_test_and_set_bit(nr, vaddr) : \
__generic_test_and_set_bit(nr, vaddr))
extern __inline__ int __constant_test_and_set_bit(int nr,volatile unsigned long * vaddr)
static inline int __constant_test_and_set_bit(int nr,
volatile unsigned long *vaddr)
{
char retval;
......@@ -32,7 +33,8 @@ extern __inline__ int __constant_test_and_set_bit(int nr,volatile unsigned long
return retval;
}
extern __inline__ int __generic_test_and_set_bit(int nr,volatile unsigned long * vaddr)
static inline int __generic_test_and_set_bit(int nr,
volatile unsigned long *vaddr)
{
char retval;
......@@ -49,13 +51,13 @@ extern __inline__ int __generic_test_and_set_bit(int nr,volatile unsigned long *
#define __set_bit(nr,vaddr) set_bit(nr,vaddr)
extern __inline__ void __constant_set_bit(int nr, volatile unsigned long * vaddr)
static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bset %1,%0"
: "+m" (((volatile char *)vaddr)[(nr^31) >> 3]) : "di" (nr & 7));
}
extern __inline__ void __generic_set_bit(int nr, volatile unsigned long * vaddr)
static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bfset %1@{%0:#1}"
: : "d" (nr^31), "a" (vaddr) : "memory");
......@@ -68,7 +70,8 @@ extern __inline__ void __generic_set_bit(int nr, volatile unsigned long * vaddr)
#define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
extern __inline__ int __constant_test_and_clear_bit(int nr, volatile unsigned long * vaddr)
static inline int __constant_test_and_clear_bit(int nr,
volatile unsigned long *vaddr)
{
char retval;
......@@ -79,7 +82,8 @@ extern __inline__ int __constant_test_and_clear_bit(int nr, volatile unsigned lo
return retval;
}
extern __inline__ int __generic_test_and_clear_bit(int nr, volatile unsigned long * vaddr)
static inline int __generic_test_and_clear_bit(int nr,
volatile unsigned long *vaddr)
{
char retval;
......@@ -101,13 +105,13 @@ extern __inline__ int __generic_test_and_clear_bit(int nr, volatile unsigned lon
__generic_clear_bit(nr, vaddr))
#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
extern __inline__ void __constant_clear_bit(int nr, volatile unsigned long * vaddr)
static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bclr %1,%0"
: "+m" (((volatile char *)vaddr)[(nr^31) >> 3]) : "di" (nr & 7));
}
extern __inline__ void __generic_clear_bit(int nr, volatile unsigned long * vaddr)
static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bfclr %1@{%0:#1}"
: : "d" (nr^31), "a" (vaddr) : "memory");
......@@ -121,7 +125,8 @@ extern __inline__ void __generic_clear_bit(int nr, volatile unsigned long * vadd
#define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
#define __change_bit(nr,vaddr) change_bit(nr,vaddr)
extern __inline__ int __constant_test_and_change_bit(int nr, volatile unsigned long * vaddr)
static inline int __constant_test_and_change_bit(int nr,
volatile unsigned long *vaddr)
{
char retval;
......@@ -132,7 +137,8 @@ extern __inline__ int __constant_test_and_change_bit(int nr, volatile unsigned l
return retval;
}
extern __inline__ int __generic_test_and_change_bit(int nr, volatile unsigned long * vaddr)
static inline int __generic_test_and_change_bit(int nr,
volatile unsigned long *vaddr)
{
char retval;
......@@ -147,25 +153,25 @@ extern __inline__ int __generic_test_and_change_bit(int nr, volatile unsigned lo
__constant_change_bit(nr, vaddr) : \
__generic_change_bit(nr, vaddr))
extern __inline__ void __constant_change_bit(int nr, volatile unsigned long * vaddr)
static inline void __constant_change_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bchg %1,%0"
: "+m" (((volatile char *)vaddr)[(nr^31) >> 3]) : "di" (nr & 7));
}
extern __inline__ void __generic_change_bit(int nr, volatile unsigned long * vaddr)
static inline void __generic_change_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bfchg %1@{%0:#1}"
: : "d" (nr^31), "a" (vaddr) : "memory");
}
extern __inline__ int test_bit(int nr, const volatile unsigned long * vaddr)
static inline int test_bit(int nr, const volatile unsigned long *vaddr)
{
return ((1UL << (nr & 31)) & (((const volatile unsigned long *) vaddr)[nr >> 5])) != 0;
}
extern __inline__ int find_first_zero_bit(const unsigned long *vaddr,
unsigned size)
static inline int find_first_zero_bit(const unsigned long *vaddr,
unsigned size)
{
const unsigned long *p = vaddr, *addr = vaddr;
unsigned long allones = ~0UL;
......@@ -188,8 +194,8 @@ extern __inline__ int find_first_zero_bit(const unsigned long *vaddr,
return ((p - addr) << 5) + (res ^ 31);
}
extern __inline__ int find_next_zero_bit (const unsigned long *vaddr, int size,
int offset)
static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
int offset)
{
const unsigned long *addr = vaddr;
const unsigned long *p = addr + (offset >> 5);
......@@ -218,7 +224,7 @@ extern __inline__ int find_next_zero_bit (const unsigned long *vaddr, int size,
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
extern __inline__ unsigned long ffz(unsigned long word)
static inline unsigned long ffz(unsigned long word)
{
int res;
......@@ -289,8 +295,7 @@ static inline int sched_find_first_bit(const unsigned long *b)
/* Bitmap functions for the minix filesystem */
extern __inline__ int
minix_find_first_zero_bit (const void *vaddr, unsigned size)
static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size)
{
const unsigned short *p = vaddr, *addr = vaddr;
int res;
......@@ -312,8 +317,7 @@ minix_find_first_zero_bit (const void *vaddr, unsigned size)
return ((p - addr) << 4) + (res ^ 31);
}
extern __inline__ int
minix_test_and_set_bit (int nr, volatile void *vaddr)
static inline int minix_test_and_set_bit(int nr, volatile void *vaddr)
{
char retval;
......@@ -325,8 +329,7 @@ minix_test_and_set_bit (int nr, volatile void *vaddr)
#define minix_set_bit(nr,addr) ((void)minix_test_and_set_bit(nr,addr))
extern __inline__ int
minix_test_and_clear_bit (int nr, volatile void *vaddr)
static inline int minix_test_and_clear_bit(int nr, volatile void *vaddr)
{
char retval;
......@@ -336,16 +339,14 @@ minix_test_and_clear_bit (int nr, volatile void *vaddr)
return retval;
}
extern __inline__ int
minix_test_bit (int nr, const volatile void *vaddr)
static inline int minix_test_bit(int nr, const volatile void *vaddr)
{
return ((1U << (nr & 15)) & (((const volatile unsigned short *) vaddr)[nr >> 4])) != 0;
}
/* Bitmap functions for the ext2 filesystem. */
extern __inline__ int
ext2_set_bit (int nr, volatile void *vaddr)
static inline int ext2_set_bit(int nr, volatile void *vaddr)
{
char retval;
......@@ -355,8 +356,7 @@ ext2_set_bit (int nr, volatile void *vaddr)
return retval;
}
extern __inline__ int
ext2_clear_bit (int nr, volatile void *vaddr)
static inline int ext2_clear_bit(int nr, volatile void *vaddr)
{
char retval;
......@@ -384,14 +384,12 @@ ext2_clear_bit (int nr, volatile void *vaddr)
ret; \
})
extern __inline__ int
ext2_test_bit (int nr, const volatile void *vaddr)
static inline int ext2_test_bit(int nr, const volatile void *vaddr)
{
return ((1U << (nr & 7)) & (((const volatile unsigned char *) vaddr)[nr >> 3])) != 0;
}
extern __inline__ int
ext2_find_first_zero_bit (const void *vaddr, unsigned size)
static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size)
{
const unsigned long *p = vaddr, *addr = vaddr;
int res;
......@@ -413,8 +411,8 @@ ext2_find_first_zero_bit (const void *vaddr, unsigned size)
return (p - addr) * 32 + res;
}
extern __inline__ int
ext2_find_next_zero_bit (const void *vaddr, unsigned size, unsigned offset)
static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size,
unsigned offset)
{
const unsigned long *addr = vaddr;
const unsigned long *p = addr + (offset >> 5);
......
......@@ -83,7 +83,7 @@ extern void cache_push_v(unsigned long vaddr, int len);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
extern inline void flush_cache_mm(struct mm_struct *mm)
static inline void flush_cache_mm(struct mm_struct *mm)
{
if (mm == current->mm)
__flush_cache_030();
......@@ -91,7 +91,7 @@ extern inline void flush_cache_mm(struct mm_struct *mm)
/* flush_cache_range/flush_cache_page must be macros to avoid
a dependency on linux/mm.h, which includes this file... */
extern inline void flush_cache_range(struct vm_area_struct *vma,
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
......@@ -99,7 +99,7 @@ extern inline void flush_cache_range(struct vm_area_struct *vma,
__flush_cache_030();
}
extern inline void flush_cache_page(struct vm_area_struct *vma,
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
if (vma->vm_mm == current->mm)
......@@ -109,7 +109,7 @@ extern inline void flush_cache_page(struct vm_area_struct *vma,
/* Push the page at kernel virtual address and clear the icache */
/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
extern inline void __flush_page_to_ram(void *vaddr)
static inline void __flush_page_to_ram(void *vaddr)
{
if (CPU_IS_040_OR_060) {
__asm__ __volatile__("nop\n\t"
......
......@@ -9,7 +9,7 @@
* Delay routines, using a pre-computed "loops_per_jiffy" value.
*/
extern __inline__ void __delay(unsigned long loops)
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__ ("1: subql #1,%0; jcc 1b"
: "=d" (loops) : "0" (loops));
......@@ -43,7 +43,8 @@ static inline void __udelay(unsigned long usecs)
((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 4295)) : \
__udelay(n))
extern __inline__ unsigned long muldiv(unsigned long a, unsigned long b, unsigned long c)
static inline unsigned long muldiv(unsigned long a, unsigned long b,
unsigned long c)
{
unsigned long tmp;
......
......@@ -58,7 +58,8 @@ extern void dvma_free(void *vaddr);
#define dvma_vtob(x) dvma_vtop(x)
#define dvma_btov(x) dvma_ptov(x)
extern inline int dvma_map_cpu(unsigned long kaddr, unsigned long vaddr, int len)
static inline int dvma_map_cpu(unsigned long kaddr, unsigned long vaddr,
int len)
{
return 0;
}
......
......@@ -285,20 +285,20 @@ static inline void isa_delay(void)
#endif /* CONFIG_PCI */
extern inline void *ioremap(unsigned long physaddr, unsigned long size)
static inline void *ioremap(unsigned long physaddr, unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
}
extern inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
}
extern inline void *ioremap_writethrough(unsigned long physaddr,
static inline void *ioremap_writethrough(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
}
extern inline void *ioremap_fullcache(unsigned long physaddr,
static inline void *ioremap_fullcache(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
......
......@@ -215,32 +215,32 @@ extern int psc_present;
* Access functions
*/
extern inline void psc_write_byte(int offset, __u8 data)
static inline void psc_write_byte(int offset, __u8 data)
{
*((volatile __u8 *)(psc + offset)) = data;
}
extern inline void psc_write_word(int offset, __u16 data)
static inline void psc_write_word(int offset, __u16 data)
{
*((volatile __u16 *)(psc + offset)) = data;
}
extern inline void psc_write_long(int offset, __u32 data)
static inline void psc_write_long(int offset, __u32 data)
{
*((volatile __u32 *)(psc + offset)) = data;
}
extern inline u8 psc_read_byte(int offset)
static inline u8 psc_read_byte(int offset)
{
return *((volatile __u8 *)(psc + offset));
}
extern inline u16 psc_read_word(int offset)
static inline u16 psc_read_word(int offset)
{
return *((volatile __u16 *)(psc + offset));
}
extern inline u32 psc_read_long(int offset)
static inline u32 psc_read_long(int offset)
{
return *((volatile __u32 *)(psc + offset));
}
......
......@@ -255,7 +255,8 @@ extern volatile __u8 *via1,*via2;
extern int rbv_present,via_alt_mapping;
extern __u8 rbv_clear;
extern __inline__ int rbv_set_video_bpp(int bpp) {
static inline int rbv_set_video_bpp(int bpp)
{
char val = (bpp==1)?0:(bpp==2)?1:(bpp==4)?2:(bpp==8)?3:-1;
if (!rbv_present || val<0) return -1;
via2[rMonP] = (via2[rMonP] & ~RBV_DEPTH) | val;
......
......@@ -13,8 +13,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#include <asm/page.h>
#include <asm/pgalloc.h>
extern inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
mm->context = virt_to_phys(mm->pgd);
return 0;
......@@ -22,7 +22,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
#define destroy_context(mm) do { } while(0)
extern inline void switch_mm_0230(struct mm_struct *mm)
static inline void switch_mm_0230(struct mm_struct *mm)
{
unsigned long crp[2] = {
0x80000000 | _PAGE_TABLE, mm->context
......@@ -55,7 +55,7 @@ extern inline void switch_mm_0230(struct mm_struct *mm)
asm volatile (".chip 68k");
}
extern inline void switch_mm_0460(struct mm_struct *mm)
static inline void switch_mm_0460(struct mm_struct *mm)
{
asm volatile (".chip 68040");
......@@ -91,7 +91,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
#define deactivate_mm(tsk,mm) do { } while (0)
extern inline void activate_mm(struct mm_struct *prev_mm,
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
next_mm->context = virt_to_phys(next_mm->pgd);
......@@ -144,7 +144,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
#define deactivate_mm(tsk,mm) do { } while (0)
extern inline void activate_mm(struct mm_struct *prev_mm,
static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
activate_context(next_mm);
......
......@@ -100,10 +100,13 @@ extern unsigned long mm_cachebits;
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
return pte;
}
extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
{
unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
unsigned long *ptr = pmdp->pmd;
......@@ -114,8 +117,10 @@ extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
}
}
extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{ pgd_val(*pgdp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp); }
static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
{
pgd_val(*pgdp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp);
}
#define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK))
#define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
......@@ -159,36 +164,40 @@ extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
extern inline int pte_read(pte_t pte) { return 1; }
extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); }
extern inline int pte_exec(pte_t pte) { return 1; }
extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_read(pte_t pte) { return 1; }
static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); }
static inline int pte_exec(pte_t pte) { return 1; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
extern inline pte_t pte_rdprotect(pte_t pte) { return pte; }
extern inline pte_t pte_exprotect(pte_t pte) { return pte; }
extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_RONLY; return pte; }
extern inline pte_t pte_mkread(pte_t pte) { return pte; }
extern inline pte_t pte_mkexec(pte_t pte) { return pte; }
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
extern inline pte_t pte_mknocache(pte_t pte)
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
static inline pte_t pte_rdprotect(pte_t pte) { return pte; }
static inline pte_t pte_exprotect(pte_t pte) { return pte; }
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_RONLY; return pte; }
static inline pte_t pte_mkread(pte_t pte) { return pte; }
static inline pte_t pte_mkexec(pte_t pte) { return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mknocache(pte_t pte)
{
pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_pgtable_cachemode;
return pte;
}
extern inline pte_t pte_mkcache(pte_t pte) { pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode; return pte; }
static inline pte_t pte_mkcache(pte_t pte)
{
pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode;
return pte;
}
#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
/* to find an entry in a page-table-directory */
extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address)
{
return mm->pgd + pgd_index(address);
}
......@@ -196,20 +205,20 @@ extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
#define swapper_pg_dir kernel_pg_dir
extern pgd_t kernel_pg_dir[128];
extern inline pgd_t * pgd_offset_k(unsigned long address)
static inline pgd_t *pgd_offset_k(unsigned long address)
{
return kernel_pg_dir + (address >> PGDIR_SHIFT);
}
/* Find an entry in the second-level page table.. */
extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
{
return (pmd_t *)__pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
}
/* Find an entry in the third-level page table.. */
extern inline pte_t * pte_offset_kernel(pmd_t * pmdp, unsigned long address)
static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
{
return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
}
......
......@@ -15,25 +15,25 @@
#define nubus_memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
#define nubus_memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
extern inline void *nubus_remap_nocache_ser(unsigned long physaddr,
static inline void *nubus_remap_nocache_ser(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
}
extern inline void *nubus_remap_nocache_nonser(unsigned long physaddr,
static inline void *nubus_remap_nocache_nonser(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_NONSER);
}
extern inline void *nbus_remap_writethrough(unsigned long physaddr,
static inline void *nbus_remap_writethrough(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
}
extern inline void *nubus_remap_fullcache(unsigned long physaddr,
static inline void *nubus_remap_fullcache(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
......
......@@ -110,7 +110,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
/* Pure 2^n version of get_order */
extern __inline__ int get_order(unsigned long size)
static inline int get_order(unsigned long size)
{
int order;
......
......@@ -37,12 +37,12 @@ struct pci_bus_info
#define pcibios_assign_all_busses() 0
extern inline void pcibios_set_master(struct pci_dev *dev)
static inline void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
extern inline void pcibios_penalize_isa_irq(int irq)
static inline void pcibios_penalize_isa_irq(int irq)
{
/* We don't do dynamic PCI IRQ allocation */
}
......
......@@ -114,7 +114,7 @@ extern void *empty_zero_page;
* It makes no sense to consider whether we cross a memory boundary if
* we support just one physical chunk of memory.
*/
extern inline int mm_end_of_chunk (unsigned long addr, int len)
static inline int mm_end_of_chunk(unsigned long addr, int len)
{
return 0;
}
......@@ -129,8 +129,8 @@ extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
* tables contain all the necessary information. The Sun3 does, but
* they are updated on demand.
*/
extern inline void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte)
static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte)
{
}
......
......@@ -18,14 +18,16 @@
#include <asm/fpu.h>
#include <asm/ptrace.h>
extern inline unsigned long rdusp(void) {
unsigned long usp;
static inline unsigned long rdusp(void)
{
unsigned long usp;
__asm__ __volatile__("move %/usp,%0" : "=a" (usp));
return usp;
}
extern inline void wrusp(unsigned long usp) {
static inline void wrusp(unsigned long usp)
{
__asm__ __volatile__("move %0,%/usp" : : "a" (usp));
}
......
......@@ -20,17 +20,17 @@ extern void *sparc_alloc_io (u32, void *, int, char *, u32, int);
/* sbus IO functions stolen from include/asm-sparc/io.h for the serial driver */
/* No SBUS on the Sun3, kludge -- sam */
extern inline void _sbus_writeb(unsigned char val, unsigned long addr)
static inline void _sbus_writeb(unsigned char val, unsigned long addr)
{
*(volatile unsigned char *)addr = val;
}
extern inline unsigned char _sbus_readb(unsigned long addr)
static inline unsigned char _sbus_readb(unsigned long addr)
{
return *(volatile unsigned char *)addr;
}
extern inline void _sbus_writel(unsigned long val, unsigned long addr)
static inline void _sbus_writel(unsigned long val, unsigned long addr)
{
*(volatile unsigned long *)addr = val;
......
......@@ -52,7 +52,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
extern inline void sema_init (struct semaphore *sem, int val)
static inline void sema_init(struct semaphore *sem, int val)
{
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
}
......@@ -82,7 +82,7 @@ asmlinkage void __up(struct semaphore * sem);
* "down_failed" is a special asm handler that calls the C
* routine that actually waits. See arch/m68k/lib/semaphore.S
*/
extern inline void down(struct semaphore * sem)
static inline void down(struct semaphore *sem)
{
register struct semaphore *sem1 __asm__ ("%a1") = sem;
......@@ -104,7 +104,7 @@ extern inline void down(struct semaphore * sem)
: "memory");
}
extern inline int down_interruptible(struct semaphore * sem)
static inline int down_interruptible(struct semaphore *sem)
{
register struct semaphore *sem1 __asm__ ("%a1") = sem;
register int result __asm__ ("%d0");
......@@ -129,7 +129,7 @@ extern inline int down_interruptible(struct semaphore * sem)
return result;
}
extern inline int down_trylock(struct semaphore * sem)
static inline int down_trylock(struct semaphore *sem)
{
register struct semaphore *sem1 __asm__ ("%a1") = sem;
register int result __asm__ ("%d0");
......@@ -160,7 +160,7 @@ extern inline int down_trylock(struct semaphore * sem)
* The default case (no contention) will result in NO
* jumps for both down() and up().
*/
extern inline void up(struct semaphore * sem)
static inline void up(struct semaphore *sem)
{
register struct semaphore *sem1 __asm__ ("%a1") = sem;
......
......@@ -176,25 +176,25 @@ typedef struct sigaltstack {
#define __HAVE_ARCH_SIG_BITOPS
extern __inline__ void sigaddset(sigset_t *set, int _sig)
static inline void sigaddset(sigset_t *set, int _sig)
{
__asm__("bfset %0{%1,#1}" : "=m" (*set) : "id" ((_sig - 1) ^ 31)
: "cc");
}
extern __inline__ void sigdelset(sigset_t *set, int _sig)
static inline void sigdelset(sigset_t *set, int _sig)
{
__asm__("bfclr %0{%1,#1}" : "=m"(*set) : "id"((_sig - 1) ^ 31)
: "cc");
}
extern __inline__ int __const_sigismember(sigset_t *set, int _sig)
static inline int __const_sigismember(sigset_t *set, int _sig)
{
unsigned long sig = _sig - 1;
return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW));
}
extern __inline__ int __gen_sigismember(sigset_t *set, int _sig)
static inline int __gen_sigismember(sigset_t *set, int _sig)
{
int ret;
__asm__("bfextu %1{%2,#1},%0"
......@@ -207,7 +207,7 @@ extern __inline__ int __gen_sigismember(sigset_t *set, int _sig)
__const_sigismember(set,sig) : \
__gen_sigismember(set,sig))
extern __inline__ int sigfindinword(unsigned long word)
static inline int sigfindinword(unsigned long word)
{
__asm__("bfffo %1{#0,#0},%0" : "=d"(word) : "d"(word & -word) : "cc");
return word ^ 31;
......
......@@ -82,7 +82,7 @@ static inline char * strchr(const char * s, int c)
#if 0
#define __HAVE_ARCH_STRPBRK
extern inline char * strpbrk(const char * cs,const char * ct)
static inline char *strpbrk(const char *cs,const char *ct)
{
const char *sc1,*sc2;
......@@ -530,7 +530,8 @@ extern int memcmp(const void * ,const void * ,size_t );
memcmp((cs),(ct),(n)))
#define __HAVE_ARCH_MEMCHR
extern inline void * memchr(const void * cs, int c, size_t count) {
static inline void *memchr(const void *cs, int c, size_t count)
{
/* Someone else can optimize this, I don't care - tonym@mac.linux-m68k.org */
unsigned char *ret = (unsigned char *)cs;
for(;count>0;count--,ret++)
......
......@@ -103,22 +103,27 @@
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
extern inline pte_t pte_modify (pte_t pte, pgprot_t newprot)
{ pte_val(pte) = (pte_val(pte) & SUN3_PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & SUN3_PAGE_CHG_MASK) | pgprot_val(newprot);
return pte;
}
#define pmd_set(pmdp,ptep) do {} while (0)
extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{ pgd_val(*pgdp) = virt_to_phys(pmdp); }
static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
{
pgd_val(*pgdp) = virt_to_phys(pmdp);
}
#define __pte_page(pte) \
((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
#define __pmd_page(pmd) \
((unsigned long) __va (pmd_val (pmd) & PAGE_MASK))
extern inline int pte_none (pte_t pte) { return !pte_val (pte); }
extern inline int pte_present (pte_t pte) { return pte_val (pte) & SUN3_PAGE_VALID; }
extern inline void pte_clear (pte_t *ptep) { pte_val (*ptep) = 0; }
static inline int pte_none (pte_t pte) { return !pte_val (pte); }
static inline int pte_present (pte_t pte) { return pte_val (pte) & SUN3_PAGE_VALID; }
static inline void pte_clear (pte_t *ptep) { pte_val (*ptep) = 0; }
#define pte_pfn(pte) (pte_val(pte) & SUN3_PAGE_PGNUM_MASK)
#define pfn_pte(pfn, pgprot) \
......@@ -128,20 +133,20 @@ extern inline void pte_clear (pte_t *ptep) { pte_val (*ptep) = 0; }
#define pmd_page(pmd) (mem_map+((__pmd_page(pmd) - PAGE_OFFSET) >> PAGE_SHIFT))
extern inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
#define pmd_none(pmd) pmd_none2(&(pmd))
//extern inline int pmd_bad (pmd_t pmd) { return (pmd_val (pmd) & SUN3_PMD_MASK) != SUN3_PMD_MAGIC; }
extern inline int pmd_bad2 (pmd_t *pmd) { return 0; }
//static inline int pmd_bad (pmd_t pmd) { return (pmd_val (pmd) & SUN3_PMD_MASK) != SUN3_PMD_MAGIC; }
static inline int pmd_bad2 (pmd_t *pmd) { return 0; }
#define pmd_bad(pmd) pmd_bad2(&(pmd))
extern inline int pmd_present2 (pmd_t *pmd) { return pmd_val (*pmd) & SUN3_PMD_VALID; }
static inline int pmd_present2 (pmd_t *pmd) { return pmd_val (*pmd) & SUN3_PMD_VALID; }
/* #define pmd_present(pmd) pmd_present2(&(pmd)) */
#define pmd_present(pmd) (!pmd_none2(&(pmd)))
extern inline void pmd_clear (pmd_t *pmdp) { pmd_val (*pmdp) = 0; }
static inline void pmd_clear (pmd_t *pmdp) { pmd_val (*pmdp) = 0; }
extern inline int pgd_none (pgd_t pgd) { return 0; }
extern inline int pgd_bad (pgd_t pgd) { return 0; }
extern inline int pgd_present (pgd_t pgd) { return 1; }
extern inline void pgd_clear (pgd_t *pgdp) {}
static inline int pgd_none (pgd_t pgd) { return 0; }
static inline int pgd_bad (pgd_t pgd) { return 0; }
static inline int pgd_present (pgd_t pgd) { return 1; }
static inline void pgd_clear (pgd_t *pgdp) {}
#define pte_ERROR(e) \
......@@ -157,28 +162,28 @@ extern inline void pgd_clear (pgd_t *pgdp) {}
* Undefined behaviour if not...
* [we have the full set here even if they don't change from m68k]
*/
extern inline int pte_read(pte_t pte) { return 1; }
extern inline int pte_write(pte_t pte) { return pte_val(pte) & SUN3_PAGE_WRITEABLE; }
extern inline int pte_exec(pte_t pte) { return 1; }
extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & SUN3_PAGE_MODIFIED; }
extern inline int pte_young(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
extern inline int pte_file(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_WRITEABLE; return pte; }
extern inline pte_t pte_rdprotect(pte_t pte) { return pte; }
extern inline pte_t pte_exprotect(pte_t pte) { return pte; }
extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_MODIFIED; return pte; }
extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_ACCESSED; return pte; }
extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= SUN3_PAGE_WRITEABLE; return pte; }
extern inline pte_t pte_mkread(pte_t pte) { return pte; }
extern inline pte_t pte_mkexec(pte_t pte) { return pte; }
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= SUN3_PAGE_MODIFIED; return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= SUN3_PAGE_ACCESSED; return pte; }
extern inline pte_t pte_mknocache(pte_t pte) { pte_val(pte) |= SUN3_PAGE_NOCACHE; return pte; }
static inline int pte_read(pte_t pte) { return 1; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & SUN3_PAGE_WRITEABLE; }
static inline int pte_exec(pte_t pte) { return 1; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & SUN3_PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_WRITEABLE; return pte; }
static inline pte_t pte_rdprotect(pte_t pte) { return pte; }
static inline pte_t pte_exprotect(pte_t pte) { return pte; }
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_MODIFIED; return pte; }
static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= SUN3_PAGE_WRITEABLE; return pte; }
static inline pte_t pte_mkread(pte_t pte) { return pte; }
static inline pte_t pte_mkexec(pte_t pte) { return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= SUN3_PAGE_MODIFIED; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= SUN3_PAGE_ACCESSED; return pte; }
static inline pte_t pte_mknocache(pte_t pte) { pte_val(pte) |= SUN3_PAGE_NOCACHE; return pte; }
// use this version when caches work...
//extern inline pte_t pte_mkcache(pte_t pte) { pte_val(pte) &= SUN3_PAGE_NOCACHE; return pte; }
//static inline pte_t pte_mkcache(pte_t pte) { pte_val(pte) &= SUN3_PAGE_NOCACHE; return pte; }
// until then, use:
extern inline pte_t pte_mkcache(pte_t pte) { return pte; }
static inline pte_t pte_mkcache(pte_t pte) { return pte; }
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
......@@ -193,7 +198,7 @@ extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* Find an entry in the second-level pagetable. */
extern inline pmd_t *pmd_offset (pgd_t *pgd, unsigned long address)
static inline pmd_t *pmd_offset (pgd_t *pgd, unsigned long address)
{
return (pmd_t *) pgd;
}
......
......@@ -69,7 +69,7 @@
#ifndef __ASSEMBLY__
/* Read bus error status register (implicitly clearing it). */
extern __inline__ unsigned char sun3_get_buserr (void)
static inline unsigned char sun3_get_buserr(void)
{
unsigned char sfc, c;
......@@ -82,7 +82,7 @@ extern __inline__ unsigned char sun3_get_buserr (void)
}
/* Read segmap from hardware MMU. */
extern __inline__ unsigned long sun3_get_segmap (unsigned long addr)
static inline unsigned long sun3_get_segmap(unsigned long addr)
{
register unsigned long entry;
unsigned char c, sfc;
......@@ -97,7 +97,7 @@ extern __inline__ unsigned long sun3_get_segmap (unsigned long addr)
}
/* Write segmap to hardware MMU. */
extern __inline__ void sun3_put_segmap (unsigned long addr, unsigned long entry)
static inline void sun3_put_segmap(unsigned long addr, unsigned long entry)
{
unsigned char sfc;
......@@ -110,7 +110,7 @@ extern __inline__ void sun3_put_segmap (unsigned long addr, unsigned long entry)
}
/* Read PTE from hardware MMU. */
extern __inline__ unsigned long sun3_get_pte (unsigned long addr)
static inline unsigned long sun3_get_pte(unsigned long addr)
{
register unsigned long entry;
unsigned char sfc;
......@@ -124,7 +124,7 @@ extern __inline__ unsigned long sun3_get_pte (unsigned long addr)
}
/* Write PTE to hardware MMU. */
extern __inline__ void sun3_put_pte (unsigned long addr, unsigned long entry)
static inline void sun3_put_pte(unsigned long addr, unsigned long entry)
{
unsigned char sfc;
......@@ -137,7 +137,7 @@ extern __inline__ void sun3_put_pte (unsigned long addr, unsigned long entry)
}
/* get current context */
extern __inline__ unsigned char sun3_get_context(void)
static inline unsigned char sun3_get_context(void)
{
unsigned char sfc, c;
......@@ -150,7 +150,7 @@ extern __inline__ unsigned char sun3_get_context(void)
}
/* set alternate context */
extern __inline__ void sun3_put_context(unsigned char c)
static inline void sun3_put_context(unsigned char c)
{
unsigned char dfc;
GET_DFC(dfc);
......
......@@ -87,7 +87,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
flush_tlb_all();
}
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
......@@ -214,7 +214,7 @@ static inline void flush_tlb_kernel_page (unsigned long addr)
sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
}
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
......
......@@ -14,7 +14,7 @@
/* We let the MMU do all checking */
#define access_ok(type,addr,size) 1
extern inline int verify_area(int type, const void * addr, unsigned long size)
static inline int verify_area(int type, const void *addr, unsigned long size)
{
return access_ok(type,addr,size)?0:-EFAULT;
}
......
......@@ -22,12 +22,12 @@
extern unsigned long mm_vtop(unsigned long addr) __attribute__ ((const));
extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
#else
extern inline unsigned long mm_vtop(unsigned long vaddr)
static inline unsigned long mm_vtop(unsigned long vaddr)
{
return __pa(vaddr);
}
extern inline unsigned long mm_ptov(unsigned long paddr)
static inline unsigned long mm_ptov(unsigned long paddr)
{
return (unsigned long)__va(paddr);
}
......
......@@ -15,25 +15,25 @@
#define z_memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
#define z_memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
extern inline void *z_remap_nocache_ser(unsigned long physaddr,
unsigned long size)
static inline void *z_remap_nocache_ser(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
}
extern inline void *z_remap_nocache_nonser(unsigned long physaddr,
unsigned long size)
static inline void *z_remap_nocache_nonser(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_NONSER);
}
extern inline void *z_remap_writethrough(unsigned long physaddr,
unsigned long size)
static inline void *z_remap_writethrough(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
}
extern inline void *z_remap_fullcache(unsigned long physaddr,
unsigned long size)
static inline void *z_remap_fullcache(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment