Commit e6155e6b authored by Hirokazu Takata's avatar Hirokazu Takata Committed by Linus Torvalds

[PATCH] m32r: change to use temporary register variables

I made a patch to upgrade some header files for m32r.

- Change to use temporary register variables allocated by the compiler,
  instead of fiexd register varialbes.
- Change __inline__ to inline.
Signed-off-by: default avatarHirokazu Takata <takata@linux-m32r.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 20f3a7cb
#ifndef _ASM_M32R_BITOPS_H #ifndef _ASM_M32R_BITOPS_H
#define _ASM_M32R_BITOPS_H #define _ASM_M32R_BITOPS_H
/* $Id$ */
/* /*
* linux/include/asm-m32r/bitops.h * linux/include/asm-m32r/bitops.h
* orig : i386 2.4.10
* *
* Copyright 1992, Linus Torvalds. * Copyright 1992, Linus Torvalds.
* *
* M32R version: * M32R version:
* Copyright (C) 2001, 2002 Hitoshi Yamamoto * Copyright (C) 2001, 2002 Hitoshi Yamamoto
* Copyright (C) 2004 Hirokazu Takata * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -50,24 +47,25 @@ ...@@ -50,24 +47,25 @@
* Note that @nr may be almost arbitrarily large; this function is not * Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity. * restricted to acting on a single-word quantity.
*/ */
static __inline__ void set_bit(int nr, volatile void * addr) static inline void set_bit(int nr, volatile void * addr)
{ {
__u32 mask; __u32 mask;
volatile __u32 *a = addr; volatile __u32 *a = addr;
unsigned long flags; unsigned long flags;
unsigned long tmp;
a += (nr >> 5); a += (nr >> 5);
mask = (1 << (nr & 0x1F)); mask = (1 << (nr & 0x1F));
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
DCACHE_CLEAR("r4", "r6", "%0") DCACHE_CLEAR("%0", "r6", "%1")
LOAD" r4, @%0; \n\t" LOAD" %0, @%1; \n\t"
"or r4, %1; \n\t" "or %0, %2; \n\t"
STORE" r4, @%0; \n\t" STORE" %0, @%1; \n\t"
: /* no outputs */ : "=&r" (tmp)
: "r" (a), "r" (mask) : "r" (a), "r" (mask)
: "memory", "r4" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r6" , "r6"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
...@@ -84,7 +82,7 @@ static __inline__ void set_bit(int nr, volatile void * addr) ...@@ -84,7 +82,7 @@ static __inline__ void set_bit(int nr, volatile void * addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static __inline__ void __set_bit(int nr, volatile void * addr) static inline void __set_bit(int nr, volatile void * addr)
{ {
__u32 mask; __u32 mask;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -104,11 +102,12 @@ static __inline__ void __set_bit(int nr, volatile void * addr) ...@@ -104,11 +102,12 @@ static __inline__ void __set_bit(int nr, volatile void * addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors. * in order to ensure changes are visible on other processors.
*/ */
static __inline__ void clear_bit(int nr, volatile void * addr) static inline void clear_bit(int nr, volatile void * addr)
{ {
__u32 mask; __u32 mask;
volatile __u32 *a = addr; volatile __u32 *a = addr;
unsigned long flags; unsigned long flags;
unsigned long tmp;
a += (nr >> 5); a += (nr >> 5);
mask = (1 << (nr & 0x1F)); mask = (1 << (nr & 0x1F));
...@@ -116,13 +115,13 @@ static __inline__ void clear_bit(int nr, volatile void * addr) ...@@ -116,13 +115,13 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
DCACHE_CLEAR("r4", "r6", "%0") DCACHE_CLEAR("%0", "r6", "%1")
LOAD" r4, @%0; \n\t" LOAD" %0, @%1; \n\t"
"and r4, %1; \n\t" "and %0, %2; \n\t"
STORE" r4, @%0; \n\t" STORE" %0, @%1; \n\t"
: /* no outputs */ : "=&r" (tmp)
: "r" (a), "r" (~mask) : "r" (a), "r" (~mask)
: "memory", "r4" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r6" , "r6"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
...@@ -130,7 +129,7 @@ static __inline__ void clear_bit(int nr, volatile void * addr) ...@@ -130,7 +129,7 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
local_irq_restore(flags); local_irq_restore(flags);
} }
static __inline__ void __clear_bit(int nr, volatile unsigned long * addr) static inline void __clear_bit(int nr, volatile unsigned long * addr)
{ {
unsigned long mask; unsigned long mask;
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
...@@ -152,7 +151,7 @@ static __inline__ void __clear_bit(int nr, volatile unsigned long * addr) ...@@ -152,7 +151,7 @@ static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static __inline__ void __change_bit(int nr, volatile void * addr) static inline void __change_bit(int nr, volatile void * addr)
{ {
__u32 mask; __u32 mask;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -171,24 +170,25 @@ static __inline__ void __change_bit(int nr, volatile void * addr) ...@@ -171,24 +170,25 @@ static __inline__ void __change_bit(int nr, volatile void * addr)
* Note that @nr may be almost arbitrarily large; this function is not * Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity. * restricted to acting on a single-word quantity.
*/ */
static __inline__ void change_bit(int nr, volatile void * addr) static inline void change_bit(int nr, volatile void * addr)
{ {
__u32 mask; __u32 mask;
volatile __u32 *a = addr; volatile __u32 *a = addr;
unsigned long flags; unsigned long flags;
unsigned long tmp;
a += (nr >> 5); a += (nr >> 5);
mask = (1 << (nr & 0x1F)); mask = (1 << (nr & 0x1F));
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
DCACHE_CLEAR("r4", "r6", "%0") DCACHE_CLEAR("%0", "r6", "%1")
LOAD" r4, @%0; \n\t" LOAD" %0, @%1; \n\t"
"xor r4, %1; \n\t" "xor %0, %2; \n\t"
STORE" r4, @%0; \n\t" STORE" %0, @%1; \n\t"
: /* no outputs */ : "=&r" (tmp)
: "r" (a), "r" (mask) : "r" (a), "r" (mask)
: "memory", "r4" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r6" , "r6"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
...@@ -204,28 +204,30 @@ static __inline__ void change_bit(int nr, volatile void * addr) ...@@ -204,28 +204,30 @@ static __inline__ void change_bit(int nr, volatile void * addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static __inline__ int test_and_set_bit(int nr, volatile void * addr) static inline int test_and_set_bit(int nr, volatile void * addr)
{ {
__u32 mask, oldbit; __u32 mask, oldbit;
volatile __u32 *a = addr; volatile __u32 *a = addr;
unsigned long flags; unsigned long flags;
unsigned long tmp;
a += (nr >> 5); a += (nr >> 5);
mask = (1 << (nr & 0x1F)); mask = (1 << (nr & 0x1F));
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
DCACHE_CLEAR("%0", "r4", "%1") DCACHE_CLEAR("%0", "%1", "%2")
LOAD" %0, @%1; \n\t" LOAD" %0, @%2; \n\t"
"mv r4, %0; \n\t" "mv %1, %0; \n\t"
"and %0, %2; \n\t" "and %0, %3; \n\t"
"or r4, %2; \n\t" "or %1, %3; \n\t"
STORE" r4, @%1; \n\t" STORE" %1, @%2; \n\t"
: "=&r" (oldbit) : "=&r" (oldbit), "=&r" (tmp)
: "r" (a), "r" (mask) : "r" (a), "r" (mask)
: "memory", "r4" : "memory"
); );
local_irq_restore(flags); local_irq_restore(flags);
return (oldbit != 0); return (oldbit != 0);
} }
...@@ -238,7 +240,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) ...@@ -238,7 +240,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static __inline__ int __test_and_set_bit(int nr, volatile void * addr) static inline int __test_and_set_bit(int nr, volatile void * addr)
{ {
__u32 mask, oldbit; __u32 mask, oldbit;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -259,11 +261,12 @@ static __inline__ int __test_and_set_bit(int nr, volatile void * addr) ...@@ -259,11 +261,12 @@ static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static __inline__ int test_and_clear_bit(int nr, volatile void * addr) static inline int test_and_clear_bit(int nr, volatile void * addr)
{ {
__u32 mask, oldbit; __u32 mask, oldbit;
volatile __u32 *a = addr; volatile __u32 *a = addr;
unsigned long flags; unsigned long flags;
unsigned long tmp;
a += (nr >> 5); a += (nr >> 5);
mask = (1 << (nr & 0x1F)); mask = (1 << (nr & 0x1F));
...@@ -271,16 +274,16 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) ...@@ -271,16 +274,16 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
DCACHE_CLEAR("%0", "r4", "%2") DCACHE_CLEAR("%0", "%1", "%3")
LOAD" %0, @%2; \n\t" LOAD" %0, @%3; \n\t"
"mv r4, %0; \n\t" "mv %1, %0; \n\t"
"and %0, %1; \n\t" "and %0, %2; \n\t"
"not %1, %1; \n\t" "not %2, %2; \n\t"
"and r4, %1; \n\t" "and %1, %2; \n\t"
STORE" r4, @%2; \n\t" STORE" %1, @%3; \n\t"
: "=&r" (oldbit), "+r" (mask) : "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
: "r" (a) : "r" (a)
: "memory", "r4" : "memory"
); );
local_irq_restore(flags); local_irq_restore(flags);
...@@ -296,7 +299,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) ...@@ -296,7 +299,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) static inline int __test_and_clear_bit(int nr, volatile void * addr)
{ {
__u32 mask, oldbit; __u32 mask, oldbit;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -310,7 +313,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) ...@@ -310,7 +313,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
} }
/* WARNING: non atomic and it can be reordered! */ /* WARNING: non atomic and it can be reordered! */
static __inline__ int __test_and_change_bit(int nr, volatile void * addr) static inline int __test_and_change_bit(int nr, volatile void * addr)
{ {
__u32 mask, oldbit; __u32 mask, oldbit;
volatile __u32 *a = addr; volatile __u32 *a = addr;
...@@ -331,28 +334,30 @@ static __inline__ int __test_and_change_bit(int nr, volatile void * addr) ...@@ -331,28 +334,30 @@ static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static __inline__ int test_and_change_bit(int nr, volatile void * addr) static inline int test_and_change_bit(int nr, volatile void * addr)
{ {
__u32 mask, oldbit; __u32 mask, oldbit;
volatile __u32 *a = addr; volatile __u32 *a = addr;
unsigned long flags; unsigned long flags;
unsigned long tmp;
a += (nr >> 5); a += (nr >> 5);
mask = (1 << (nr & 0x1F)); mask = (1 << (nr & 0x1F));
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
DCACHE_CLEAR("%0", "r4", "%1") DCACHE_CLEAR("%0", "%1", "%2")
LOAD" %0, @%1; \n\t" LOAD" %0, @%2; \n\t"
"mv r4, %0; \n\t" "mv %1, %0; \n\t"
"and %0, %2; \n\t" "and %0, %3; \n\t"
"xor r4, %2; \n\t" "xor %1, %3; \n\t"
STORE" r4, @%1; \n\t" STORE" %1, @%2; \n\t"
: "=&r" (oldbit) : "=&r" (oldbit), "=&r" (tmp)
: "r" (a), "r" (mask) : "r" (a), "r" (mask)
: "memory", "r4" : "memory"
); );
local_irq_restore(flags); local_irq_restore(flags);
return (oldbit != 0); return (oldbit != 0);
} }
...@@ -365,7 +370,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) ...@@ -365,7 +370,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
static int test_bit(int nr, const volatile void * addr); static int test_bit(int nr, const volatile void * addr);
#endif #endif
static __inline__ int test_bit(int nr, const volatile void * addr) static inline int test_bit(int nr, const volatile void * addr)
{ {
__u32 mask; __u32 mask;
const volatile __u32 *a = addr; const volatile __u32 *a = addr;
...@@ -382,7 +387,7 @@ static __inline__ int test_bit(int nr, const volatile void * addr) ...@@ -382,7 +387,7 @@ static __inline__ int test_bit(int nr, const volatile void * addr)
* *
* Undefined if no zero exists, so code should check against ~0UL first. * Undefined if no zero exists, so code should check against ~0UL first.
*/ */
static __inline__ unsigned long ffz(unsigned long word) static inline unsigned long ffz(unsigned long word)
{ {
int k; int k;
...@@ -415,7 +420,7 @@ static __inline__ unsigned long ffz(unsigned long word) ...@@ -415,7 +420,7 @@ static __inline__ unsigned long ffz(unsigned long word)
* @offset: The bitnumber to start searching at * @offset: The bitnumber to start searching at
* @size: The maximum size to search * @size: The maximum size to search
*/ */
static __inline__ int find_next_zero_bit(void *addr, int size, int offset) static inline int find_next_zero_bit(void *addr, int size, int offset)
{ {
unsigned long *p = ((unsigned long *) addr) + (offset >> 5); unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
unsigned long result = offset & ~31UL; unsigned long result = offset & ~31UL;
...@@ -457,7 +462,7 @@ static __inline__ int find_next_zero_bit(void *addr, int size, int offset) ...@@ -457,7 +462,7 @@ static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
* *
* Undefined if no bit exists, so code should check against 0 first. * Undefined if no bit exists, so code should check against 0 first.
*/ */
static __inline__ unsigned long __ffs(unsigned long word) static inline unsigned long __ffs(unsigned long word)
{ {
int k = 0; int k = 0;
...@@ -483,7 +488,7 @@ static __inline__ unsigned long __ffs(unsigned long word) ...@@ -483,7 +488,7 @@ static __inline__ unsigned long __ffs(unsigned long word)
* unlikely to be set. It's guaranteed that at least one of the 140 * unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared. * bits is cleared.
*/ */
static __inline__ int sched_find_first_bit(unsigned long *b) static inline int sched_find_first_bit(unsigned long *b)
{ {
if (unlikely(b[0])) if (unlikely(b[0]))
return __ffs(b[0]); return __ffs(b[0]);
...@@ -502,7 +507,7 @@ static __inline__ int sched_find_first_bit(unsigned long *b) ...@@ -502,7 +507,7 @@ static __inline__ int sched_find_first_bit(unsigned long *b)
* @offset: The bitnumber to start searching at * @offset: The bitnumber to start searching at
* @size: The maximum size to search * @size: The maximum size to search
*/ */
static __inline__ unsigned long find_next_bit(const unsigned long *addr, static inline unsigned long find_next_bit(const unsigned long *addr,
unsigned long size, unsigned long offset) unsigned long size, unsigned long offset)
{ {
unsigned int *p = ((unsigned int *) addr) + (offset >> 5); unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
...@@ -589,7 +594,7 @@ static __inline__ unsigned long find_next_bit(const unsigned long *addr, ...@@ -589,7 +594,7 @@ static __inline__ unsigned long find_next_bit(const unsigned long *addr,
#define ext2_find_first_zero_bit find_first_zero_bit #define ext2_find_first_zero_bit find_first_zero_bit
#define ext2_find_next_zero_bit find_next_zero_bit #define ext2_find_next_zero_bit find_next_zero_bit
#else #else
static __inline__ int ext2_set_bit(int nr, volatile void * addr) static inline int ext2_set_bit(int nr, volatile void * addr)
{ {
__u8 mask, oldbit; __u8 mask, oldbit;
volatile __u8 *a = addr; volatile __u8 *a = addr;
...@@ -602,7 +607,7 @@ static __inline__ int ext2_set_bit(int nr, volatile void * addr) ...@@ -602,7 +607,7 @@ static __inline__ int ext2_set_bit(int nr, volatile void * addr)
return (oldbit != 0); return (oldbit != 0);
} }
static __inline__ int ext2_clear_bit(int nr, volatile void * addr) static inline int ext2_clear_bit(int nr, volatile void * addr)
{ {
__u8 mask, oldbit; __u8 mask, oldbit;
volatile __u8 *a = addr; volatile __u8 *a = addr;
...@@ -615,7 +620,7 @@ static __inline__ int ext2_clear_bit(int nr, volatile void * addr) ...@@ -615,7 +620,7 @@ static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
return (oldbit != 0); return (oldbit != 0);
} }
static __inline__ int ext2_test_bit(int nr, const volatile void * addr) static inline int ext2_test_bit(int nr, const volatile void * addr)
{ {
__u32 mask; __u32 mask;
const volatile __u8 *a = addr; const volatile __u8 *a = addr;
...@@ -629,7 +634,7 @@ static __inline__ int ext2_test_bit(int nr, const volatile void * addr) ...@@ -629,7 +634,7 @@ static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
#define ext2_find_first_zero_bit(addr, size) \ #define ext2_find_first_zero_bit(addr, size) \
ext2_find_next_zero_bit((addr), (size), 0) ext2_find_next_zero_bit((addr), (size), 0)
static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, static inline unsigned long ext2_find_next_zero_bit(void *addr,
unsigned long size, unsigned long offset) unsigned long size, unsigned long offset)
{ {
unsigned long *p = ((unsigned long *) addr) + (offset >> 5); unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
...@@ -709,4 +714,3 @@ static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, ...@@ -709,4 +714,3 @@ static __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_M32R_BITOPS_H */ #endif /* _ASM_M32R_BITOPS_H */
#ifndef _ASM_M32R_SEMAPHORE_H #ifndef _ASM_M32R_SEMAPHORE_H
#define _ASM_M32R_SEMAPHORE_H #define _ASM_M32R_SEMAPHORE_H
/* $Id$ */
#include <linux/linkage.h> #include <linux/linkage.h>
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -10,39 +8,15 @@ ...@@ -10,39 +8,15 @@
/* /*
* SMP- and interrupt-safe semaphores.. * SMP- and interrupt-safe semaphores..
* *
* (C) Copyright 1996 Linus Torvalds * Copyright (C) 1996 Linus Torvalds
* * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
* Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
* the original code and to make semaphore waits
* interruptible so that processes waiting on
* semaphores can be killed.
* Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
* functions in asm/sempahore-helper.h while fixing a
* potential and subtle race discovered by Ulrich Schmid
* in down_interruptible(). Since I started to play here I
* also implemented the `trylock' semaphore operation.
* 1999-07-02 Artur Skawina <skawina@geocities.com>
* Optimized "0(ecx)" -> "(ecx)" (the assembler does not
* do this). Changed calling sequences from push/jmp to
* traditional call/ret.
* Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
* Some hacks to ensure compatibility with recent
* GCC snapshots, to avoid stack corruption when compiling
* with -fomit-frame-pointer. It's not sure if this will
* be fixed in GCC, as our previous implementation was a
* bit dubious.
*
* If you would like to see an analysis of this implementation, please
* ftp to gcom.com and download the file
* /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
*
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <asm/system.h>
#include <asm/atomic.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <asm/system.h>
#include <asm/atomic.h>
#undef LOAD #undef LOAD
#undef STORE #undef STORE
...@@ -58,21 +32,14 @@ struct semaphore { ...@@ -58,21 +32,14 @@ struct semaphore {
atomic_t count; atomic_t count;
int sleepers; int sleepers;
wait_queue_head_t wait; wait_queue_head_t wait;
#ifdef WAITQUEUE_DEBUG
long __magic;
#endif
}; };
#ifdef WAITQUEUE_DEBUG #define __SEMAPHORE_INITIALIZER(name, n) \
# define __SEM_DEBUG_INIT(name) \ { \
, (int)&(name).__magic .count = ATOMIC_INIT(n), \
#else .sleepers = 0, \
# define __SEM_DEBUG_INIT(name) .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
#endif }
#define __SEMAPHORE_INITIALIZER(name,count) \
{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
__SEM_DEBUG_INIT(name) }
#define __MUTEX_INITIALIZER(name) \ #define __MUTEX_INITIALIZER(name) \
__SEMAPHORE_INITIALIZER(name,1) __SEMAPHORE_INITIALIZER(name,1)
...@@ -83,7 +50,7 @@ struct semaphore { ...@@ -83,7 +50,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
static __inline__ void sema_init (struct semaphore *sem, int val) static inline void sema_init (struct semaphore *sem, int val)
{ {
/* /*
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
...@@ -94,17 +61,14 @@ static __inline__ void sema_init (struct semaphore *sem, int val) ...@@ -94,17 +61,14 @@ static __inline__ void sema_init (struct semaphore *sem, int val)
atomic_set(&sem->count, val); atomic_set(&sem->count, val);
sem->sleepers = 0; sem->sleepers = 0;
init_waitqueue_head(&sem->wait); init_waitqueue_head(&sem->wait);
#ifdef WAITQUEUE_DEBUG
sem->__magic = (int)&sem->__magic;
#endif
} }
static __inline__ void init_MUTEX (struct semaphore *sem) static inline void init_MUTEX (struct semaphore *sem)
{ {
sema_init(sem, 1); sema_init(sem, 1);
} }
static __inline__ void init_MUTEX_LOCKED (struct semaphore *sem) static inline void init_MUTEX_LOCKED (struct semaphore *sem)
{ {
sema_init(sem, 0); sema_init(sem, 0);
} }
...@@ -120,19 +84,15 @@ asmlinkage int __down_trylock(struct semaphore * sem); ...@@ -120,19 +84,15 @@ asmlinkage int __down_trylock(struct semaphore * sem);
asmlinkage void __up(struct semaphore * sem); asmlinkage void __up(struct semaphore * sem);
/* /*
* This is ugly, but we want the default case to fall through. * Atomically decrement the semaphore's count. If it goes negative,
* "__down_failed" is a special asm handler that calls the C * block the calling thread in the TASK_UNINTERRUPTIBLE state.
* routine that actually waits. See arch/i386/kernel/semaphore.c
*/ */
static __inline__ void down(struct semaphore * sem) static inline void down(struct semaphore * sem)
{ {
unsigned long flags; unsigned long flags;
int temp; long count;
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
might_sleep();
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
"# down \n\t" "# down \n\t"
...@@ -140,7 +100,7 @@ static __inline__ void down(struct semaphore * sem) ...@@ -140,7 +100,7 @@ static __inline__ void down(struct semaphore * sem)
LOAD" %0, @%1; \n\t" LOAD" %0, @%1; \n\t"
"addi %0, #-1; \n\t" "addi %0, #-1; \n\t"
STORE" %0, @%1; \n\t" STORE" %0, @%1; \n\t"
: "=&r" (temp) : "=&r" (count)
: "r" (&sem->count) : "r" (&sem->count)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
...@@ -149,7 +109,7 @@ static __inline__ void down(struct semaphore * sem) ...@@ -149,7 +109,7 @@ static __inline__ void down(struct semaphore * sem)
); );
local_irq_restore(flags); local_irq_restore(flags);
if (temp < 0) if (unlikely(count < 0))
__down(sem); __down(sem);
} }
...@@ -157,16 +117,13 @@ static __inline__ void down(struct semaphore * sem) ...@@ -157,16 +117,13 @@ static __inline__ void down(struct semaphore * sem)
* Interruptible try to acquire a semaphore. If we obtained * Interruptible try to acquire a semaphore. If we obtained
* it, return zero. If we were interrupted, returns -EINTR * it, return zero. If we were interrupted, returns -EINTR
*/ */
static __inline__ int down_interruptible(struct semaphore * sem) static inline int down_interruptible(struct semaphore * sem)
{ {
unsigned long flags; unsigned long flags;
int temp; long count;
int result = 0; int result = 0;
#ifdef WAITQUEUE_DEBUG might_sleep();
CHECK_MAGIC(sem->__magic);
#endif
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
"# down_interruptible \n\t" "# down_interruptible \n\t"
...@@ -174,7 +131,7 @@ static __inline__ int down_interruptible(struct semaphore * sem) ...@@ -174,7 +131,7 @@ static __inline__ int down_interruptible(struct semaphore * sem)
LOAD" %0, @%1; \n\t" LOAD" %0, @%1; \n\t"
"addi %0, #-1; \n\t" "addi %0, #-1; \n\t"
STORE" %0, @%1; \n\t" STORE" %0, @%1; \n\t"
: "=&r" (temp) : "=&r" (count)
: "r" (&sem->count) : "r" (&sem->count)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
...@@ -183,7 +140,7 @@ static __inline__ int down_interruptible(struct semaphore * sem) ...@@ -183,7 +140,7 @@ static __inline__ int down_interruptible(struct semaphore * sem)
); );
local_irq_restore(flags); local_irq_restore(flags);
if (temp < 0) if (unlikely(count < 0))
result = __down_interruptible(sem); result = __down_interruptible(sem);
return result; return result;
...@@ -193,16 +150,12 @@ static __inline__ int down_interruptible(struct semaphore * sem) ...@@ -193,16 +150,12 @@ static __inline__ int down_interruptible(struct semaphore * sem)
* Non-blockingly attempt to down() a semaphore. * Non-blockingly attempt to down() a semaphore.
* Returns zero if we acquired it * Returns zero if we acquired it
*/ */
static __inline__ int down_trylock(struct semaphore * sem) static inline int down_trylock(struct semaphore * sem)
{ {
unsigned long flags; unsigned long flags;
int temp; long count;
int result = 0; int result = 0;
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
"# down_trylock \n\t" "# down_trylock \n\t"
...@@ -210,7 +163,7 @@ static __inline__ int down_trylock(struct semaphore * sem) ...@@ -210,7 +163,7 @@ static __inline__ int down_trylock(struct semaphore * sem)
LOAD" %0, @%1; \n\t" LOAD" %0, @%1; \n\t"
"addi %0, #-1; \n\t" "addi %0, #-1; \n\t"
STORE" %0, @%1; \n\t" STORE" %0, @%1; \n\t"
: "=&r" (temp) : "=&r" (count)
: "r" (&sem->count) : "r" (&sem->count)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
...@@ -219,7 +172,7 @@ static __inline__ int down_trylock(struct semaphore * sem) ...@@ -219,7 +172,7 @@ static __inline__ int down_trylock(struct semaphore * sem)
); );
local_irq_restore(flags); local_irq_restore(flags);
if (temp < 0) if (unlikely(count < 0))
result = __down_trylock(sem); result = __down_trylock(sem);
return result; return result;
...@@ -231,14 +184,10 @@ static __inline__ int down_trylock(struct semaphore * sem) ...@@ -231,14 +184,10 @@ static __inline__ int down_trylock(struct semaphore * sem)
* The default case (no contention) will result in NO * The default case (no contention) will result in NO
* jumps for both down() and up(). * jumps for both down() and up().
*/ */
static __inline__ void up(struct semaphore * sem) static inline void up(struct semaphore * sem)
{ {
unsigned long flags; unsigned long flags;
int temp; long count;
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
local_irq_save(flags); local_irq_save(flags);
__asm__ __volatile__ ( __asm__ __volatile__ (
...@@ -247,7 +196,7 @@ static __inline__ void up(struct semaphore * sem) ...@@ -247,7 +196,7 @@ static __inline__ void up(struct semaphore * sem)
LOAD" %0, @%1; \n\t" LOAD" %0, @%1; \n\t"
"addi %0, #1; \n\t" "addi %0, #1; \n\t"
STORE" %0, @%1; \n\t" STORE" %0, @%1; \n\t"
: "=&r" (temp) : "=&r" (count)
: "r" (&sem->count) : "r" (&sem->count)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
...@@ -256,11 +205,10 @@ static __inline__ void up(struct semaphore * sem) ...@@ -256,11 +205,10 @@ static __inline__ void up(struct semaphore * sem)
); );
local_irq_restore(flags); local_irq_restore(flags);
if (temp <= 0) if (unlikely(count <= 0))
__up(sem); __up(sem);
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_M32R_SEMAPHORE_H */ #endif /* _ASM_M32R_SEMAPHORE_H */
#ifndef _ASM_M32R_SPINLOCK_H #ifndef _ASM_M32R_SPINLOCK_H
#define _ASM_M32R_SPINLOCK_H #define _ASM_M32R_SPINLOCK_H
/* $Id$ */
/* /*
* linux/include/asm-m32r/spinlock.h * linux/include/asm-m32r/spinlock.h
* orig : i386 2.4.10
* *
* M32R version: * M32R version:
* Copyright (C) 2001, 2002 Hitoshi Yamamoto * Copyright (C) 2001, 2002 Hitoshi Yamamoto
* Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
*/ */
#include <linux/config.h> /* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */ #include <linux/config.h> /* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */
...@@ -41,6 +39,9 @@ typedef struct { ...@@ -41,6 +39,9 @@ typedef struct {
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
unsigned magic; unsigned magic;
#endif #endif
#ifdef CONFIG_PREEMPT
unsigned int break_lock;
#endif
} spinlock_t; } spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead #define SPINLOCK_MAGIC 0xdead4ead
...@@ -66,22 +67,17 @@ typedef struct { ...@@ -66,22 +67,17 @@ typedef struct {
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
/* /**
* This works. Despite all the confusion. * _raw_spin_trylock - Try spin lock and return a result
* @lock: Pointer to the lock variable
*
* _raw_spin_trylock() tries to get the lock and returns a result.
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
*/ */
static inline int _raw_spin_trylock(spinlock_t *lock)
/*======================================================================*
* Try spin lock
*======================================================================*
* Argument:
* arg0: lock
* Return value:
* =1: Success
* =0: Failure
*======================================================================*/
static __inline__ int _raw_spin_trylock(spinlock_t *lock)
{ {
int oldval; int oldval;
unsigned long tmp1, tmp2;
/* /*
* lock->lock : =1 : unlock * lock->lock : =1 : unlock
...@@ -93,16 +89,16 @@ static __inline__ int _raw_spin_trylock(spinlock_t *lock) ...@@ -93,16 +89,16 @@ static __inline__ int _raw_spin_trylock(spinlock_t *lock)
*/ */
__asm__ __volatile__ ( __asm__ __volatile__ (
"# spin_trylock \n\t" "# spin_trylock \n\t"
"ldi r4, #0; \n\t" "ldi %1, #0; \n\t"
"mvfc r5, psw; \n\t" "mvfc %2, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("%0", "r6", "%1") DCACHE_CLEAR("%0", "r6", "%3")
"lock %0, @%1; \n\t" "lock %0, @%3; \n\t"
"unlock r4, @%1; \n\t" "unlock %1, @%3; \n\t"
"mvtc r5, psw; \n\t" "mvtc %2, psw; \n\t"
: "=&r" (oldval) : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
: "r" (&lock->lock) : "r" (&lock->lock)
: "memory", "r4", "r5" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r6" , "r6"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
...@@ -111,8 +107,10 @@ static __inline__ int _raw_spin_trylock(spinlock_t *lock) ...@@ -111,8 +107,10 @@ static __inline__ int _raw_spin_trylock(spinlock_t *lock)
return (oldval > 0); return (oldval > 0);
} }
static __inline__ void _raw_spin_lock(spinlock_t *lock) static inline void _raw_spin_lock(spinlock_t *lock)
{ {
unsigned long tmp0, tmp1;
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
__label__ here; __label__ here;
here: here:
...@@ -135,31 +133,31 @@ static __inline__ void _raw_spin_lock(spinlock_t *lock) ...@@ -135,31 +133,31 @@ static __inline__ void _raw_spin_lock(spinlock_t *lock)
"# spin_lock \n\t" "# spin_lock \n\t"
".fillinsn \n" ".fillinsn \n"
"1: \n\t" "1: \n\t"
"mvfc r5, psw; \n\t" "mvfc %1, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r6", "%0") DCACHE_CLEAR("%0", "r6", "%2")
"lock r4, @%0; \n\t" "lock %0, @%2; \n\t"
"addi r4, #-1; \n\t" "addi %0, #-1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%2; \n\t"
"mvtc r5, psw; \n\t" "mvtc %1, psw; \n\t"
"bltz r4, 2f; \n\t" "bltz %0, 2f; \n\t"
LOCK_SECTION_START(".balign 4 \n\t") LOCK_SECTION_START(".balign 4 \n\t")
".fillinsn \n" ".fillinsn \n"
"2: \n\t" "2: \n\t"
"ld r4, @%0; \n\t" "ld %0, @%2; \n\t"
"bgtz r4, 1b; \n\t" "bgtz %0, 1b; \n\t"
"bra 2b; \n\t" "bra 2b; \n\t"
LOCK_SECTION_END LOCK_SECTION_END
: /* no outputs */ : "=&r" (tmp0), "=&r" (tmp1)
: "r" (&lock->lock) : "r" (&lock->lock)
: "memory", "r4", "r5" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r6" , "r6"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
); );
} }
static __inline__ void _raw_spin_unlock(spinlock_t *lock) static inline void _raw_spin_unlock(spinlock_t *lock)
{ {
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
BUG_ON(lock->magic != SPINLOCK_MAGIC); BUG_ON(lock->magic != SPINLOCK_MAGIC);
...@@ -184,6 +182,9 @@ typedef struct { ...@@ -184,6 +182,9 @@ typedef struct {
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
unsigned magic; unsigned magic;
#endif #endif
#ifdef CONFIG_PREEMPT
unsigned int break_lock;
#endif
} rwlock_t; } rwlock_t;
#define RWLOCK_MAGIC 0xdeaf1eed #define RWLOCK_MAGIC 0xdeaf1eed
...@@ -211,8 +212,10 @@ typedef struct { ...@@ -211,8 +212,10 @@ typedef struct {
*/ */
/* the spinlock helpers are in arch/i386/kernel/semaphore.c */ /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
static __inline__ void _raw_read_lock(rwlock_t *rw) static inline void _raw_read_lock(rwlock_t *rw)
{ {
unsigned long tmp0, tmp1;
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
BUG_ON(rw->magic != RWLOCK_MAGIC); BUG_ON(rw->magic != RWLOCK_MAGIC);
#endif #endif
...@@ -231,40 +234,42 @@ static __inline__ void _raw_read_lock(rwlock_t *rw) ...@@ -231,40 +234,42 @@ static __inline__ void _raw_read_lock(rwlock_t *rw)
"# read_lock \n\t" "# read_lock \n\t"
".fillinsn \n" ".fillinsn \n"
"1: \n\t" "1: \n\t"
"mvfc r5, psw; \n\t" "mvfc %1, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r6", "%0") DCACHE_CLEAR("%0", "r6", "%2")
"lock r4, @%0; \n\t" "lock %0, @%2; \n\t"
"addi r4, #-1; \n\t" "addi %0, #-1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%2; \n\t"
"mvtc r5, psw; \n\t" "mvtc %1, psw; \n\t"
"bltz r4, 2f; \n\t" "bltz %0, 2f; \n\t"
LOCK_SECTION_START(".balign 4 \n\t") LOCK_SECTION_START(".balign 4 \n\t")
".fillinsn \n" ".fillinsn \n"
"2: \n\t" "2: \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r6", "%0") DCACHE_CLEAR("%0", "r6", "%2")
"lock r4, @%0; \n\t" "lock %0, @%2; \n\t"
"addi r4, #1; \n\t" "addi %0, #1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%2; \n\t"
"mvtc r5, psw; \n\t" "mvtc %1, psw; \n\t"
".fillinsn \n" ".fillinsn \n"
"3: \n\t" "3: \n\t"
"ld r4, @%0; \n\t" "ld %0, @%2; \n\t"
"bgtz r4, 1b; \n\t" "bgtz %0, 1b; \n\t"
"bra 3b; \n\t" "bra 3b; \n\t"
LOCK_SECTION_END LOCK_SECTION_END
: /* no outputs */ : "=&r" (tmp0), "=&r" (tmp1)
: "r" (&rw->lock) : "r" (&rw->lock)
: "memory", "r4", "r5" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r6" , "r6"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
); );
} }
static __inline__ void _raw_write_lock(rwlock_t *rw) static inline void _raw_write_lock(rwlock_t *rw)
{ {
unsigned long tmp0, tmp1, tmp2;
#if SPINLOCK_DEBUG #if SPINLOCK_DEBUG
BUG_ON(rw->magic != RWLOCK_MAGIC); BUG_ON(rw->magic != RWLOCK_MAGIC);
#endif #endif
...@@ -281,85 +286,91 @@ static __inline__ void _raw_write_lock(rwlock_t *rw) ...@@ -281,85 +286,91 @@ static __inline__ void _raw_write_lock(rwlock_t *rw)
*/ */
__asm__ __volatile__ ( __asm__ __volatile__ (
"# write_lock \n\t" "# write_lock \n\t"
"seth r5, #high(" RW_LOCK_BIAS_STR "); \n\t" "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
"or3 r5, r5, #low(" RW_LOCK_BIAS_STR "); \n\t" "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
".fillinsn \n" ".fillinsn \n"
"1: \n\t" "1: \n\t"
"mvfc r6, psw; \n\t" "mvfc %2, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r7", "%0") DCACHE_CLEAR("%0", "r7", "%3")
"lock r4, @%0; \n\t" "lock %0, @%3; \n\t"
"sub r4, r5; \n\t" "sub %0, %1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%3; \n\t"
"mvtc r6, psw; \n\t" "mvtc %2, psw; \n\t"
"bnez r4, 2f; \n\t" "bnez %0, 2f; \n\t"
LOCK_SECTION_START(".balign 4 \n\t") LOCK_SECTION_START(".balign 4 \n\t")
".fillinsn \n" ".fillinsn \n"
"2: \n\t" "2: \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r7", "%0") DCACHE_CLEAR("%0", "r7", "%3")
"lock r4, @%0; \n\t" "lock %0, @%3; \n\t"
"add r4, r5; \n\t" "add %0, %1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%3; \n\t"
"mvtc r6, psw; \n\t" "mvtc %2, psw; \n\t"
".fillinsn \n" ".fillinsn \n"
"3: \n\t" "3: \n\t"
"ld r4, @%0; \n\t" "ld %0, @%3; \n\t"
"beq r4, r5, 1b; \n\t" "beq %0, %1, 1b; \n\t"
"bra 3b; \n\t" "bra 3b; \n\t"
LOCK_SECTION_END LOCK_SECTION_END
: /* no outputs */ : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
: "r" (&rw->lock) : "r" (&rw->lock)
: "memory", "r4", "r5", "r6" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r7" , "r7"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
); );
} }
static __inline__ void _raw_read_unlock(rwlock_t *rw) static inline void _raw_read_unlock(rwlock_t *rw)
{ {
unsigned long tmp0, tmp1;
__asm__ __volatile__ ( __asm__ __volatile__ (
"# read_unlock \n\t" "# read_unlock \n\t"
"mvfc r5, psw; \n\t" "mvfc %1, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r6", "%0") DCACHE_CLEAR("%0", "r6", "%2")
"lock r4, @%0; \n\t" "lock %0, @%2; \n\t"
"addi r4, #1; \n\t" "addi %0, #1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%2; \n\t"
"mvtc r5, psw; \n\t" "mvtc %1, psw; \n\t"
: /* no outputs */ : "=&r" (tmp0), "=&r" (tmp1)
: "r" (&rw->lock) : "r" (&rw->lock)
: "memory", "r4", "r5" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r6" , "r6"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
); );
} }
static __inline__ void _raw_write_unlock(rwlock_t *rw) static inline void _raw_write_unlock(rwlock_t *rw)
{ {
unsigned long tmp0, tmp1, tmp2;
__asm__ __volatile__ ( __asm__ __volatile__ (
"# write_unlock \n\t" "# write_unlock \n\t"
"seth r5, #high(" RW_LOCK_BIAS_STR "); \n\t" "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
"or3 r5, r5, #low(" RW_LOCK_BIAS_STR "); \n\t" "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
"mvfc r6, psw; \n\t" "mvfc %2, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t" "clrpsw #0x40 -> nop; \n\t"
DCACHE_CLEAR("r4", "r7", "%0") DCACHE_CLEAR("%0", "r7", "%3")
"lock r4, @%0; \n\t" "lock %0, @%3; \n\t"
"add r4, r5; \n\t" "add %0, %1; \n\t"
"unlock r4, @%0; \n\t" "unlock %0, @%3; \n\t"
"mvtc r6, psw; \n\t" "mvtc %2, psw; \n\t"
: /* no outputs */ : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
: "r" (&rw->lock) : "r" (&rw->lock)
: "memory", "r4", "r5", "r6" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r7" , "r7"
#endif /* CONFIG_CHIP_M32700_TS1 */ #endif /* CONFIG_CHIP_M32700_TS1 */
); );
} }
static __inline__ int _raw_write_trylock(rwlock_t *lock) #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
static inline int _raw_write_trylock(rwlock_t *lock)
{ {
atomic_t *count = (atomic_t *)lock; atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count)) if (atomic_sub_and_test(RW_LOCK_BIAS, count))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment