Commit 23f88fe4 authored by Adrian Bunk's avatar Adrian Bunk Committed by Linus Torvalds

[PATCH] include/asm-v850/ "extern inline" -> "static inline"

"extern inline" doesn't make much sense.
Signed-off-by: default avatarAdrian Bunk <bunk@stusta.de>
Cc: Miles Bader <miles@gnu.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 26d89d1e
......@@ -31,7 +31,7 @@ typedef struct { int counter; } atomic_t;
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
extern __inline__ int atomic_add_return (int i, volatile atomic_t *v)
static inline int atomic_add_return (int i, volatile atomic_t *v)
{
unsigned long flags;
int res;
......
......@@ -30,7 +30,7 @@
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
extern __inline__ unsigned long ffz (unsigned long word)
static inline unsigned long ffz (unsigned long word)
{
unsigned long result = 0;
......@@ -135,7 +135,7 @@ extern __inline__ unsigned long ffz (unsigned long word)
"m" (*((const char *)(addr) + ((nr) >> 3)))); \
__test_bit_res; \
})
extern __inline__ int __test_bit (int nr, const void *addr)
static inline int __test_bit (int nr, const void *addr)
{
int res;
__asm__ __volatile__ ("tst1 %1, [%2]; setf nz, %0"
......@@ -157,7 +157,7 @@ extern __inline__ int __test_bit (int nr, const void *addr)
#define find_first_zero_bit(addr, size) \
find_next_zero_bit ((addr), (size), 0)
extern __inline__ int find_next_zero_bit(const void *addr, int size, int offset)
static inline int find_next_zero_bit(const void *addr, int size, int offset)
{
unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
unsigned long result = offset & ~31UL;
......
......@@ -16,7 +16,7 @@
#include <asm/param.h>
extern __inline__ void __delay(unsigned long loops)
static inline void __delay(unsigned long loops)
{
if (loops)
__asm__ __volatile__ ("1: add -1, %0; bnz 1b"
......@@ -33,7 +33,7 @@ extern __inline__ void __delay(unsigned long loops)
extern unsigned long loops_per_jiffy;
extern __inline__ void udelay(unsigned long usecs)
static inline void udelay(unsigned long usecs)
{
register unsigned long full_loops, part_loops;
......
#ifndef __V850_HW_IRQ_H__
#define __V850_HW_IRQ_H__
extern inline void hw_resend_irq (struct hw_interrupt_type *h, unsigned int i)
static inline void hw_resend_irq (struct hw_interrupt_type *h, unsigned int i)
{
}
......
......@@ -59,7 +59,7 @@ struct thread_struct {
/* Do necessary setup to start up a newly executed thread. */
extern inline void start_thread (struct pt_regs *regs,
static inline void start_thread (struct pt_regs *regs,
unsigned long pc, unsigned long usp)
{
regs->pc = pc;
......@@ -68,7 +68,7 @@ extern inline void start_thread (struct pt_regs *regs,
}
/* Free all resources held by a thread. */
extern inline void release_thread (struct task_struct *dead_task)
static inline void release_thread (struct task_struct *dead_task)
{
}
......
......@@ -24,7 +24,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC (name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC (name,0)
extern inline void sema_init (struct semaphore *sem, int val)
static inline void sema_init (struct semaphore *sem, int val)
{
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
}
......@@ -52,14 +52,14 @@ extern int __down_interruptible (struct semaphore * sem);
extern int __down_trylock (struct semaphore * sem);
extern void __up (struct semaphore * sem);
extern inline void down (struct semaphore * sem)
static inline void down (struct semaphore * sem)
{
might_sleep();
if (atomic_dec_return (&sem->count) < 0)
__down (sem);
}
extern inline int down_interruptible (struct semaphore * sem)
static inline int down_interruptible (struct semaphore * sem)
{
int ret = 0;
might_sleep();
......@@ -68,7 +68,7 @@ extern inline int down_interruptible (struct semaphore * sem)
return ret;
}
extern inline int down_trylock (struct semaphore *sem)
static inline int down_trylock (struct semaphore *sem)
{
int ret = 0;
if (atomic_dec_return (&sem->count) < 0)
......@@ -76,7 +76,7 @@ extern inline int down_trylock (struct semaphore *sem)
return ret;
}
extern inline void up (struct semaphore * sem)
static inline void up (struct semaphore * sem)
{
if (atomic_inc_return (&sem->count) <= 0)
__up (sem);
......
......@@ -81,7 +81,7 @@ static inline int irqs_disabled (void)
((__typeof__ (*(ptr)))__xchg ((unsigned long)(with), (ptr), sizeof (*(ptr))))
#define tas(ptr) (xchg ((ptr), 1))
extern inline unsigned long __xchg (unsigned long with,
static inline unsigned long __xchg (unsigned long with,
__volatile__ void *ptr, int size)
{
unsigned long tmp, flags;
......
......@@ -56,12 +56,12 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
BUG ();
}
extern inline void flush_tlb_kernel_page(unsigned long addr)
static inline void flush_tlb_kernel_page(unsigned long addr)
{
BUG ();
}
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
BUG ();
......
......@@ -14,7 +14,7 @@
#define VERIFY_READ 0
#define VERIFY_WRITE 1
extern inline int access_ok (int type, const void *addr, unsigned long size)
static inline int access_ok (int type, const void *addr, unsigned long size)
{
/* XXX I guess we should check against real ram bounds at least, and
possibly make sure ADDR is not within the kernel.
......
......@@ -82,19 +82,19 @@ extern int __bug_unaligned_x(void *ptr);
})
extern inline void __put_unaligned_2(__u32 __v, register __u8 *__p)
static inline void __put_unaligned_2(__u32 __v, register __u8 *__p)
{
*__p++ = __v;
*__p++ = __v >> 8;
}
extern inline void __put_unaligned_4(__u32 __v, register __u8 *__p)
static inline void __put_unaligned_4(__u32 __v, register __u8 *__p)
{
__put_unaligned_2(__v >> 16, __p + 2);
__put_unaligned_2(__v, __p);
}
extern inline void __put_unaligned_8(const unsigned long long __v, register __u8 *__p)
static inline void __put_unaligned_8(const unsigned long long __v, register __u8 *__p)
{
/*
* tradeoff: 8 bytes of stack for all unaligned puts (2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment