Commit e51886ef authored by Hirokazu Takata's avatar Hirokazu Takata Committed by Linus Torvalds

[PATCH] m32r: Make kernel headers for mutual exclusion

Here is a patch to update kernel headers for mutual exclusion,
atomic.h, bitops.h and semaphore.h of m32r.
This patch is for making these headers publishable to userland.

	* include/asm-m32r/assembler.h (M32R_LOCK, M32R_UNLOCK):
	  Define M32R_LOCK and M32R_UNLOCK macros. For SMP configuration,
	  these macros are expanded to m32r's LOCK and UNLOCK instructions.
	  While, for UP configuration, these are simply expanded to m32r's
	  LD(load) and ST(store) instructions, respectively.

	* include/asm-m32r/atomic.h, include/asm-m32r/bitops.h,
	  include/asm-m32r/semaphore.h:
	  - Change macros from LOAD and STORE to M32R_LOCK and M32R_UNLOCK,
	    respectively.  It is because LOAD and STORE are too generic words.
	  - Change inline to __inline__.
	    Retrieve __inline__ modifiers for functions which are placed
	    outside of __KERNEL__ region in these headers, because those
	    functions might be included and used from ISO C program in
	    userland.

Currently, it seems that these headers are allowed to be included from
userland.  Indeed, they are kernel stuff, but these headers provide
useful definitions and functions even for userland applications, I think.
Signed-off-by: default avatarHirokazu Takata <takata@linux-m32r.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c5ffcfc6
#ifndef _ASM_M32R_ASSEMBLER_H #ifndef _ASM_M32R_ASSEMBLER_H
#define _ASM_M32R_ASSEMBLER_H #define _ASM_M32R_ASSEMBLER_H
/* $Id$ */
/* /*
* linux/asm-m32r/assembler.h * linux/asm-m32r/assembler.h
* *
* This file contains M32R architecture specific defines. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
* *
* Do not include any C declarations in this file - it is included by * This file contains M32R architecture specific macro definitions.
* assembler source.
*/ */
#include <linux/config.h> #include <linux/config.h>
#ifndef __STR
#ifdef __ASSEMBLY__
#define __STR(x) x
#else
#define __STR(x) #x
#endif
#endif /* __STR */
#ifdef CONFIG_SMP
#define M32R_LOCK __STR(lock)
#define M32R_UNLOCK __STR(unlock)
#else
#define M32R_LOCK __STR(ld)
#define M32R_UNLOCK __STR(st)
#endif
#ifdef __ASSEMBLY__
#undef ENTRY #undef ENTRY
#define ENTRY(name) ENTRY_M name #define ENTRY(name) ENTRY_M name
.macro ENTRY_M name .macro ENTRY_M name
...@@ -22,12 +35,13 @@ ...@@ -22,12 +35,13 @@
ALIGN ALIGN
\name: \name:
.endm .endm
#endif
/*
* LDIMM: load immediate value /**
* * LDIMM - load immediate value
* STI: enable interruption * STI - enable interruption
* CLI: disable interruption * CLI - disable interruption
*/ */
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
...@@ -209,4 +223,3 @@ ...@@ -209,4 +223,3 @@
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_M32R_ASSEMBLER_H */ #endif /* _ASM_M32R_ASSEMBLER_H */
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <asm/assembler.h>
#include <asm/system.h> #include <asm/system.h>
/* /*
...@@ -17,16 +18,6 @@ ...@@ -17,16 +18,6 @@
* resource counting etc.. * resource counting etc..
*/ */
#undef LOAD
#undef STORE
#ifdef CONFIG_SMP
#define LOAD "lock"
#define STORE "unlock"
#else
#define LOAD "ld"
#define STORE "st"
#endif
/* /*
* Make sure gcc doesn't try to be clever and move things around * Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us, * on us. We need to use _exactly_ the address the user gave us,
...@@ -60,7 +51,7 @@ typedef struct { volatile int counter; } atomic_t; ...@@ -60,7 +51,7 @@ typedef struct { volatile int counter; } atomic_t;
* *
* Atomically adds @i to @v and return (@i + @v). * Atomically adds @i to @v and return (@i + @v).
*/ */
static inline int atomic_add_return(int i, atomic_t *v) static __inline__ int atomic_add_return(int i, atomic_t *v)
{ {
unsigned long flags; unsigned long flags;
int result; int result;
...@@ -69,9 +60,9 @@ static inline int atomic_add_return(int i, atomic_t *v) ...@@ -69,9 +60,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
__asm__ __volatile__ ( __asm__ __volatile__ (
"# atomic_add_return \n\t" "# atomic_add_return \n\t"
DCACHE_CLEAR("%0", "r4", "%1") DCACHE_CLEAR("%0", "r4", "%1")
LOAD" %0, @%1; \n\t" M32R_LOCK" %0, @%1; \n\t"
"add %0, %2; \n\t" "add %0, %2; \n\t"
STORE" %0, @%1; \n\t" M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (result) : "=&r" (result)
: "r" (&v->counter), "r" (i) : "r" (&v->counter), "r" (i)
: "memory" : "memory"
...@@ -91,7 +82,7 @@ static inline int atomic_add_return(int i, atomic_t *v) ...@@ -91,7 +82,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
* *
* Atomically subtracts @i from @v and return (@v - @i). * Atomically subtracts @i from @v and return (@v - @i).
*/ */
static inline int atomic_sub_return(int i, atomic_t *v) static __inline__ int atomic_sub_return(int i, atomic_t *v)
{ {
unsigned long flags; unsigned long flags;
int result; int result;
...@@ -100,9 +91,9 @@ static inline int atomic_sub_return(int i, atomic_t *v) ...@@ -100,9 +91,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
__asm__ __volatile__ ( __asm__ __volatile__ (
"# atomic_sub_return \n\t" "# atomic_sub_return \n\t"
DCACHE_CLEAR("%0", "r4", "%1") DCACHE_CLEAR("%0", "r4", "%1")
LOAD" %0, @%1; \n\t" M32R_LOCK" %0, @%1; \n\t"
"sub %0, %2; \n\t" "sub %0, %2; \n\t"
STORE" %0, @%1; \n\t" M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (result) : "=&r" (result)
: "r" (&v->counter), "r" (i) : "r" (&v->counter), "r" (i)
: "memory" : "memory"
...@@ -150,7 +141,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) ...@@ -150,7 +141,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
* *
* Atomically increments @v by 1 and returns the result. * Atomically increments @v by 1 and returns the result.
*/ */
static inline int atomic_inc_return(atomic_t *v) static __inline__ int atomic_inc_return(atomic_t *v)
{ {
unsigned long flags; unsigned long flags;
int result; int result;
...@@ -159,9 +150,9 @@ static inline int atomic_inc_return(atomic_t *v) ...@@ -159,9 +150,9 @@ static inline int atomic_inc_return(atomic_t *v)
__asm__ __volatile__ ( __asm__ __volatile__ (
"# atomic_inc_return \n\t" "# atomic_inc_return \n\t"
DCACHE_CLEAR("%0", "r4", "%1") DCACHE_CLEAR("%0", "r4", "%1")
LOAD" %0, @%1; \n\t" M32R_LOCK" %0, @%1; \n\t"
"addi %0, #1; \n\t" "addi %0, #1; \n\t"
STORE" %0, @%1; \n\t" M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (result) : "=&r" (result)
: "r" (&v->counter) : "r" (&v->counter)
: "memory" : "memory"
...@@ -180,7 +171,7 @@ static inline int atomic_inc_return(atomic_t *v) ...@@ -180,7 +171,7 @@ static inline int atomic_inc_return(atomic_t *v)
* *
* Atomically decrements @v by 1 and returns the result. * Atomically decrements @v by 1 and returns the result.
*/ */
static inline int atomic_dec_return(atomic_t *v) static __inline__ int atomic_dec_return(atomic_t *v)
{ {
unsigned long flags; unsigned long flags;
int result; int result;
...@@ -189,9 +180,9 @@ static inline int atomic_dec_return(atomic_t *v) ...@@ -189,9 +180,9 @@ static inline int atomic_dec_return(atomic_t *v)
__asm__ __volatile__ ( __asm__ __volatile__ (
"# atomic_dec_return \n\t" "# atomic_dec_return \n\t"
DCACHE_CLEAR("%0", "r4", "%1") DCACHE_CLEAR("%0", "r4", "%1")
LOAD" %0, @%1; \n\t" M32R_LOCK" %0, @%1; \n\t"
"addi %0, #-1; \n\t" "addi %0, #-1; \n\t"
STORE" %0, @%1; \n\t" M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (result) : "=&r" (result)
: "r" (&v->counter) : "r" (&v->counter)
: "memory" : "memory"
...@@ -251,7 +242,7 @@ static inline int atomic_dec_return(atomic_t *v) ...@@ -251,7 +242,7 @@ static inline int atomic_dec_return(atomic_t *v)
*/ */
#define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0) #define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0)
static inline void atomic_clear_mask(unsigned long mask, atomic_t *addr) static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
{ {
unsigned long flags; unsigned long flags;
unsigned long tmp; unsigned long tmp;
...@@ -260,9 +251,9 @@ static inline void atomic_clear_mask(unsigned long mask, atomic_t *addr) ...@@ -260,9 +251,9 @@ static inline void atomic_clear_mask(unsigned long mask, atomic_t *addr)
__asm__ __volatile__ ( __asm__ __volatile__ (
"# atomic_clear_mask \n\t" "# atomic_clear_mask \n\t"
DCACHE_CLEAR("%0", "r5", "%1") DCACHE_CLEAR("%0", "r5", "%1")
LOAD" %0, @%1; \n\t" M32R_LOCK" %0, @%1; \n\t"
"and %0, %2; \n\t" "and %0, %2; \n\t"
STORE" %0, @%1; \n\t" M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (tmp) : "=&r" (tmp)
: "r" (addr), "r" (~mask) : "r" (addr), "r" (~mask)
: "memory" : "memory"
...@@ -273,7 +264,7 @@ static inline void atomic_clear_mask(unsigned long mask, atomic_t *addr) ...@@ -273,7 +264,7 @@ static inline void atomic_clear_mask(unsigned long mask, atomic_t *addr)
local_irq_restore(flags); local_irq_restore(flags);
} }
static inline void atomic_set_mask(unsigned long mask, atomic_t *addr) static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
{ {
unsigned long flags; unsigned long flags;
unsigned long tmp; unsigned long tmp;
...@@ -282,9 +273,9 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *addr) ...@@ -282,9 +273,9 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *addr)
__asm__ __volatile__ ( __asm__ __volatile__ (
"# atomic_set_mask \n\t" "# atomic_set_mask \n\t"
DCACHE_CLEAR("%0", "r5", "%1") DCACHE_CLEAR("%0", "r5", "%1")
LOAD" %0, @%1; \n\t" M32R_LOCK" %0, @%1; \n\t"
"or %0, %2; \n\t" "or %0, %2; \n\t"
STORE" %0, @%1; \n\t" M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (tmp) : "=&r" (tmp)
: "r" (addr), "r" (mask) : "r" (addr), "r" (mask)
: "memory" : "memory"
...@@ -302,4 +293,3 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *addr) ...@@ -302,4 +293,3 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *addr)
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#endif /* _ASM_M32R_ATOMIC_H */ #endif /* _ASM_M32R_ATOMIC_H */
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/assembler.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/types.h> #include <asm/types.h>
...@@ -25,18 +26,6 @@ ...@@ -25,18 +26,6 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/ */
#undef LOAD
#undef STORE
#ifdef CONFIG_SMP
#define LOAD "lock"
#define STORE "unlock"
#else
#define LOAD "ld"
#define STORE "st"
#endif
/* #define ADDR (*(volatile long *) addr) */
/** /**
* set_bit - Atomically set a bit in memory * set_bit - Atomically set a bit in memory
* @nr: the bit to set * @nr: the bit to set
...@@ -47,7 +36,7 @@ ...@@ -47,7 +36,7 @@
* Note that @nr may be almost arbitrarily large; this function is not * Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity. * restricted to acting on a single-word quantity.
*/ */
static inline void set_bit(int nr, volatile void * addr) static __inline__ void set_bit(int nr, volatile void * addr)
{ {
__u32 mask; __u32 mask;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -60,9 +49,9 @@ static inline void set_bit(int nr, volatile void * addr) ...@@ -60,9 +49,9 @@ static inline void set_bit(int nr, volatile void * addr)
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
DCACHE_CLEAR("%0", "r6", "%1") DCACHE_CLEAR("%0", "r6", "%1")
LOAD" %0, @%1; \n\t" M32R_LOCK" %0, @%1; \n\t"
"or %0, %2; \n\t" "or %0, %2; \n\t"
STORE" %0, @%1; \n\t" M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (tmp) : "=&r" (tmp)
: "r" (a), "r" (mask) : "r" (a), "r" (mask)
: "memory" : "memory"
...@@ -82,7 +71,7 @@ static inline void set_bit(int nr, volatile void * addr) ...@@ -82,7 +71,7 @@ static inline void set_bit(int nr, volatile void * addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static inline void __set_bit(int nr, volatile void * addr) static __inline__ void __set_bit(int nr, volatile void * addr)
{ {
__u32 mask; __u32 mask;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -102,7 +91,7 @@ static inline void __set_bit(int nr, volatile void * addr) ...@@ -102,7 +91,7 @@ static inline void __set_bit(int nr, volatile void * addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors. * in order to ensure changes are visible on other processors.
*/ */
static inline void clear_bit(int nr, volatile void * addr) static __inline__ void clear_bit(int nr, volatile void * addr)
{ {
__u32 mask; __u32 mask;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -116,9 +105,9 @@ static inline void clear_bit(int nr, volatile void * addr) ...@@ -116,9 +105,9 @@ static inline void clear_bit(int nr, volatile void * addr)
__asm__ __volatile__ ( __asm__ __volatile__ (
DCACHE_CLEAR("%0", "r6", "%1") DCACHE_CLEAR("%0", "r6", "%1")
LOAD" %0, @%1; \n\t" M32R_LOCK" %0, @%1; \n\t"
"and %0, %2; \n\t" "and %0, %2; \n\t"
STORE" %0, @%1; \n\t" M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (tmp) : "=&r" (tmp)
: "r" (a), "r" (~mask) : "r" (a), "r" (~mask)
: "memory" : "memory"
...@@ -129,7 +118,7 @@ static inline void clear_bit(int nr, volatile void * addr) ...@@ -129,7 +118,7 @@ static inline void clear_bit(int nr, volatile void * addr)
local_irq_restore(flags); local_irq_restore(flags);
} }
static inline void __clear_bit(int nr, volatile unsigned long * addr) static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask; unsigned long mask;
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
...@@ -151,7 +140,7 @@ static inline void __clear_bit(int nr, volatile unsigned long * addr) ...@@ -151,7 +140,7 @@ static inline void __clear_bit(int nr, volatile unsigned long * addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static inline void __change_bit(int nr, volatile void * addr) static __inline__ void __change_bit(int nr, volatile void * addr)
{ {
__u32 mask; __u32 mask;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -170,7 +159,7 @@ static inline void __change_bit(int nr, volatile void * addr) ...@@ -170,7 +159,7 @@ static inline void __change_bit(int nr, volatile void * addr)
* Note that @nr may be almost arbitrarily large; this function is not * Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity. * restricted to acting on a single-word quantity.
*/ */
static inline void change_bit(int nr, volatile void * addr) static __inline__ void change_bit(int nr, volatile void * addr)
{ {
__u32 mask; __u32 mask;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -183,9 +172,9 @@ static inline void change_bit(int nr, volatile void * addr) ...@@ -183,9 +172,9 @@ static inline void change_bit(int nr, volatile void * addr)
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
DCACHE_CLEAR("%0", "r6", "%1") DCACHE_CLEAR("%0", "r6", "%1")
LOAD" %0, @%1; \n\t" M32R_LOCK" %0, @%1; \n\t"
"xor %0, %2; \n\t" "xor %0, %2; \n\t"
STORE" %0, @%1; \n\t" M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (tmp) : "=&r" (tmp)
: "r" (a), "r" (mask) : "r" (a), "r" (mask)
: "memory" : "memory"
...@@ -204,7 +193,7 @@ static inline void change_bit(int nr, volatile void * addr) ...@@ -204,7 +193,7 @@ static inline void change_bit(int nr, volatile void * addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static inline int test_and_set_bit(int nr, volatile void * addr) static __inline__ int test_and_set_bit(int nr, volatile void * addr)
{ {
__u32 mask, oldbit; __u32 mask, oldbit;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -217,11 +206,11 @@ static inline int test_and_set_bit(int nr, volatile void * addr) ...@@ -217,11 +206,11 @@ static inline int test_and_set_bit(int nr, volatile void * addr)
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
DCACHE_CLEAR("%0", "%1", "%2") DCACHE_CLEAR("%0", "%1", "%2")
LOAD" %0, @%2; \n\t" M32R_LOCK" %0, @%2; \n\t"
"mv %1, %0; \n\t" "mv %1, %0; \n\t"
"and %0, %3; \n\t" "and %0, %3; \n\t"
"or %1, %3; \n\t" "or %1, %3; \n\t"
STORE" %1, @%2; \n\t" M32R_UNLOCK" %1, @%2; \n\t"
: "=&r" (oldbit), "=&r" (tmp) : "=&r" (oldbit), "=&r" (tmp)
: "r" (a), "r" (mask) : "r" (a), "r" (mask)
: "memory" : "memory"
...@@ -240,7 +229,7 @@ static inline int test_and_set_bit(int nr, volatile void * addr) ...@@ -240,7 +229,7 @@ static inline int test_and_set_bit(int nr, volatile void * addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static inline int __test_and_set_bit(int nr, volatile void * addr) static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
{ {
__u32 mask, oldbit; __u32 mask, oldbit;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -261,7 +250,7 @@ static inline int __test_and_set_bit(int nr, volatile void * addr) ...@@ -261,7 +250,7 @@ static inline int __test_and_set_bit(int nr, volatile void * addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static inline int test_and_clear_bit(int nr, volatile void * addr) static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
{ {
__u32 mask, oldbit; __u32 mask, oldbit;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -275,12 +264,12 @@ static inline int test_and_clear_bit(int nr, volatile void * addr) ...@@ -275,12 +264,12 @@ static inline int test_and_clear_bit(int nr, volatile void * addr)
__asm__ __volatile__ ( __asm__ __volatile__ (
DCACHE_CLEAR("%0", "%1", "%3") DCACHE_CLEAR("%0", "%1", "%3")
LOAD" %0, @%3; \n\t" M32R_LOCK" %0, @%3; \n\t"
"mv %1, %0; \n\t" "mv %1, %0; \n\t"
"and %0, %2; \n\t" "and %0, %2; \n\t"
"not %2, %2; \n\t" "not %2, %2; \n\t"
"and %1, %2; \n\t" "and %1, %2; \n\t"
STORE" %1, @%3; \n\t" M32R_UNLOCK" %1, @%3; \n\t"
: "=&r" (oldbit), "=&r" (tmp), "+r" (mask) : "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
: "r" (a) : "r" (a)
: "memory" : "memory"
...@@ -299,7 +288,7 @@ static inline int test_and_clear_bit(int nr, volatile void * addr) ...@@ -299,7 +288,7 @@ static inline int test_and_clear_bit(int nr, volatile void * addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static inline int __test_and_clear_bit(int nr, volatile void * addr) static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
{ {
__u32 mask, oldbit; __u32 mask, oldbit;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -313,7 +302,7 @@ static inline int __test_and_clear_bit(int nr, volatile void * addr) ...@@ -313,7 +302,7 @@ static inline int __test_and_clear_bit(int nr, volatile void * addr)
} }
/* WARNING: non atomic and it can be reordered! */ /* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr, volatile void * addr) static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
{ {
__u32 mask, oldbit; __u32 mask, oldbit;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -334,7 +323,7 @@ static inline int __test_and_change_bit(int nr, volatile void * addr) ...@@ -334,7 +323,7 @@ static inline int __test_and_change_bit(int nr, volatile void * addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static inline int test_and_change_bit(int nr, volatile void * addr) static __inline__ int test_and_change_bit(int nr, volatile void * addr)
{ {
__u32 mask, oldbit; __u32 mask, oldbit;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -347,11 +336,11 @@ static inline int test_and_change_bit(int nr, volatile void * addr) ...@@ -347,11 +336,11 @@ static inline int test_and_change_bit(int nr, volatile void * addr)
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
DCACHE_CLEAR("%0", "%1", "%2") DCACHE_CLEAR("%0", "%1", "%2")
LOAD" %0, @%2; \n\t" M32R_LOCK" %0, @%2; \n\t"
"mv %1, %0; \n\t" "mv %1, %0; \n\t"
"and %0, %3; \n\t" "and %0, %3; \n\t"
"xor %1, %3; \n\t" "xor %1, %3; \n\t"
STORE" %1, @%2; \n\t" M32R_UNLOCK" %1, @%2; \n\t"
: "=&r" (oldbit), "=&r" (tmp) : "=&r" (oldbit), "=&r" (tmp)
: "r" (a), "r" (mask) : "r" (a), "r" (mask)
: "memory" : "memory"
...@@ -361,16 +350,12 @@ static inline int test_and_change_bit(int nr, volatile void * addr) ...@@ -361,16 +350,12 @@ static inline int test_and_change_bit(int nr, volatile void * addr)
return (oldbit != 0); return (oldbit != 0);
} }
#if 0 /* Fool kernel-doc since it doesn't do macros yet */
/** /**
* test_bit - Determine whether a bit is set * test_bit - Determine whether a bit is set
* @nr: bit number to test * @nr: bit number to test
* @addr: Address to start counting from * @addr: Address to start counting from
*/ */
static int test_bit(int nr, const volatile void * addr); static __inline__ int test_bit(int nr, const volatile void * addr)
#endif
static inline int test_bit(int nr, const volatile void * addr)
{ {
__u32 mask; __u32 mask;
const volatile __u32 *a = addr; const volatile __u32 *a = addr;
...@@ -387,7 +372,7 @@ static inline int test_bit(int nr, const volatile void * addr) ...@@ -387,7 +372,7 @@ static inline int test_bit(int nr, const volatile void * addr)
* *
* Undefined if no zero exists, so code should check against ~0UL first. * Undefined if no zero exists, so code should check against ~0UL first.
*/ */
static inline unsigned long ffz(unsigned long word) static __inline__ unsigned long ffz(unsigned long word)
{ {
int k; int k;
...@@ -420,7 +405,7 @@ static inline unsigned long ffz(unsigned long word) ...@@ -420,7 +405,7 @@ static inline unsigned long ffz(unsigned long word)
* @offset: The bitnumber to start searching at * @offset: The bitnumber to start searching at
* @size: The maximum size to search * @size: The maximum size to search
*/ */
static inline int find_next_zero_bit(void *addr, int size, int offset) static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
{ {
unsigned long *p = ((unsigned long *) addr) + (offset >> 5); unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
unsigned long result = offset & ~31UL; unsigned long result = offset & ~31UL;
...@@ -462,7 +447,7 @@ static inline int find_next_zero_bit(void *addr, int size, int offset) ...@@ -462,7 +447,7 @@ static inline int find_next_zero_bit(void *addr, int size, int offset)
* *
* Undefined if no bit exists, so code should check against 0 first. * Undefined if no bit exists, so code should check against 0 first.
*/ */
static inline unsigned long __ffs(unsigned long word) static __inline__ unsigned long __ffs(unsigned long word)
{ {
int k = 0; int k = 0;
......
...@@ -15,19 +15,10 @@ ...@@ -15,19 +15,10 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <asm/assembler.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#undef LOAD
#undef STORE
#ifdef CONFIG_SMP
#define LOAD "lock"
#define STORE "unlock"
#else
#define LOAD "ld"
#define STORE "st"
#endif
struct semaphore { struct semaphore {
atomic_t count; atomic_t count;
int sleepers; int sleepers;
...@@ -97,9 +88,9 @@ static inline void down(struct semaphore * sem) ...@@ -97,9 +88,9 @@ static inline void down(struct semaphore * sem)
__asm__ __volatile__ ( __asm__ __volatile__ (
"# down \n\t" "# down \n\t"
DCACHE_CLEAR("%0", "r4", "%1") DCACHE_CLEAR("%0", "r4", "%1")
LOAD" %0, @%1; \n\t" M32R_LOCK" %0, @%1; \n\t"
"addi %0, #-1; \n\t" "addi %0, #-1; \n\t"
STORE" %0, @%1; \n\t" M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (count) : "=&r" (count)
: "r" (&sem->count) : "r" (&sem->count)
: "memory" : "memory"
...@@ -128,9 +119,9 @@ static inline int down_interruptible(struct semaphore * sem) ...@@ -128,9 +119,9 @@ static inline int down_interruptible(struct semaphore * sem)
__asm__ __volatile__ ( __asm__ __volatile__ (
"# down_interruptible \n\t" "# down_interruptible \n\t"
DCACHE_CLEAR("%0", "r4", "%1") DCACHE_CLEAR("%0", "r4", "%1")
LOAD" %0, @%1; \n\t" M32R_LOCK" %0, @%1; \n\t"
"addi %0, #-1; \n\t" "addi %0, #-1; \n\t"
STORE" %0, @%1; \n\t" M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (count) : "=&r" (count)
: "r" (&sem->count) : "r" (&sem->count)
: "memory" : "memory"
...@@ -160,9 +151,9 @@ static inline int down_trylock(struct semaphore * sem) ...@@ -160,9 +151,9 @@ static inline int down_trylock(struct semaphore * sem)
__asm__ __volatile__ ( __asm__ __volatile__ (
"# down_trylock \n\t" "# down_trylock \n\t"
DCACHE_CLEAR("%0", "r4", "%1") DCACHE_CLEAR("%0", "r4", "%1")
LOAD" %0, @%1; \n\t" M32R_LOCK" %0, @%1; \n\t"
"addi %0, #-1; \n\t" "addi %0, #-1; \n\t"
STORE" %0, @%1; \n\t" M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (count) : "=&r" (count)
: "r" (&sem->count) : "r" (&sem->count)
: "memory" : "memory"
...@@ -193,9 +184,9 @@ static inline void up(struct semaphore * sem) ...@@ -193,9 +184,9 @@ static inline void up(struct semaphore * sem)
__asm__ __volatile__ ( __asm__ __volatile__ (
"# up \n\t" "# up \n\t"
DCACHE_CLEAR("%0", "r4", "%1") DCACHE_CLEAR("%0", "r4", "%1")
LOAD" %0, @%1; \n\t" M32R_LOCK" %0, @%1; \n\t"
"addi %0, #1; \n\t" "addi %0, #1; \n\t"
STORE" %0, @%1; \n\t" M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (count) : "=&r" (count)
: "r" (&sem->count) : "r" (&sem->count)
: "memory" : "memory"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment