Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
de1c1419
Commit
de1c1419
authored
Jan 28, 2008
by
Jesper Nilsson
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
CRIS v32: Completely rework spinlocks for ETRAX FS and ARTPEC-3
parent
4258fb19
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
67 additions
and
105 deletions
+67
-105
include/asm-cris/arch-v32/spinlock.h
include/asm-cris/arch-v32/spinlock.h
+67
-105
No files found.
include/asm-cris/arch-v32/spinlock.h
View file @
de1c1419
#ifndef __ASM_ARCH_SPINLOCK_H
#define __ASM_ARCH_SPINLOCK_H
#include <
asm/system
.h>
#include <
linux/spinlock_types
.h>
#define RW_LOCK_BIAS 0x01000000
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
#define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
extern
void
cris_spin_unlock
(
void
*
l
,
int
val
);
extern
void
cris_spin_lock
(
void
*
l
);
extern
int
cris_spin_trylock
(
void
*
l
);
extern
int
cris_spin_trylock
(
void
*
l
);
static
inline
void
_raw_spin_unlock
(
spinlock_t
*
lock
)
static
inline
int
__raw_spin_is_locked
(
raw_spinlock_t
*
x
)
{
return
*
(
volatile
signed
char
*
)(
&
(
x
)
->
slock
)
<=
0
;
}
static
inline
void
__raw_spin_unlock
(
raw_spinlock_t
*
lock
)
{
__asm__
volatile
(
"move.d %1,%0"
\
:
"=m"
(
lock
->
lock
)
\
:
"=m"
(
lock
->
s
lock
)
\
:
"r"
(
1
)
\
:
"memory"
);
}
static
inline
int
_raw_spin_trylock
(
spinlock_t
*
lock
)
static
inline
void
__raw_spin_unlock_wait
(
raw_spinlock_t
*
lock
)
{
while
(
__raw_spin_is_locked
(
lock
))
cpu_relax
();
}
static
inline
int
__raw_spin_trylock
(
raw_spinlock_t
*
lock
)
{
return
cris_spin_trylock
((
void
*
)
&
lock
->
lock
);
return
cris_spin_trylock
((
void
*
)
&
lock
->
s
lock
);
}
static
inline
void
_
raw_spin_lock
(
spinlock_t
*
lock
)
static
inline
void
_
_raw_spin_lock
(
raw_
spinlock_t
*
lock
)
{
cris_spin_lock
((
void
*
)
&
lock
->
lock
);
cris_spin_lock
((
void
*
)
&
lock
->
s
lock
);
}
static
inline
void
_raw_spin_lock_flags
(
spinlock_t
*
lock
,
unsigned
long
flags
)
static
inline
void
__raw_spin_lock_flags
(
raw_spinlock_t
*
lock
,
unsigned
long
flags
)
{
_raw_spin_lock
(
lock
);
_
_raw_spin_lock
(
lock
);
}
/*
...
...
@@ -46,120 +53,75 @@ static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
* can "mix" irq-safe locks - any writer needs to get a
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*
*/
typedef
struct
{
spinlock_t
lock
;
volatile
int
counter
;
#ifdef CONFIG_PREEMPT
unsigned
int
break_lock
;
#endif
}
rwlock_t
;
#define RW_LOCK_UNLOCKED (rwlock_t) { {1}, 0 }
#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
/**
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
#define read_can_lock(x) ((int)(x)->counter >= 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
#define write_can_lock(x) ((x)->counter == 0)
#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
/* read_lock, read_unlock are pretty straightforward. Of course it somehow
* sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
static
__inline__
void
_raw_read_lock
(
rwlock_t
*
rw
)
static
inline
int
__raw_read_can_lock
(
raw_rwlock_t
*
x
)
{
unsigned
long
flags
;
local_irq_save
(
flags
);
_raw_spin_lock
(
&
rw
->
lock
);
rw
->
counter
++
;
_raw_spin_unlock
(
&
rw
->
lock
);
local_irq_restore
(
flags
);
return
(
int
)(
x
)
->
lock
>
0
;
}
static
__inline__
void
_raw_read_unlock
(
rwlock_t
*
rw
)
static
inline
int
__raw_write_can_lock
(
raw_rwlock_t
*
x
)
{
unsigned
long
flags
;
local_irq_save
(
flags
);
_raw_spin_lock
(
&
rw
->
lock
);
rw
->
counter
--
;
_raw_spin_unlock
(
&
rw
->
lock
);
local_irq_restore
(
flags
);
return
(
x
)
->
lock
==
RW_LOCK_BIAS
;
}
/* write_lock is less trivial. We optimistically grab the lock and check
* if we surprised any readers. If so we release the lock and wait till
* they're all gone before trying again
*
* Also note that we don't use the _irqsave / _irqrestore suffixes here.
* If we're called with interrupts enabled and we've got readers (or other
* writers) in interrupt handlers someone fucked up and we'd dead-lock
* sooner or later anyway. prumpf */
static
__inline__
void
_raw_write_lock
(
rwlock_t
*
rw
)
static
inline
void
__raw_read_lock
(
raw_rwlock_t
*
rw
)
{
retry:
_raw_spin_lock
(
&
rw
->
lock
);
if
(
rw
->
counter
!=
0
)
{
/* this basically never happens */
_raw_spin_unlock
(
&
rw
->
lock
);
while
(
rw
->
counter
!=
0
);
goto
retry
;
}
/* got it. now leave without unlocking */
rw
->
counter
=
-
1
;
/* remember we are locked */
__raw_spin_lock
(
&
rw
->
slock
);
while
(
rw
->
lock
==
0
);
rw
->
lock
--
;
__raw_spin_unlock
(
&
rw
->
slock
);
}
/* write_unlock is absolutely trivial - we don't have to wait for anything */
static
__inline__
void
_raw_write_unlock
(
rwlock_t
*
rw
)
static
inline
void
__raw_write_lock
(
raw_rwlock_t
*
rw
)
{
rw
->
counter
=
0
;
_raw_spin_unlock
(
&
rw
->
lock
);
__raw_spin_lock
(
&
rw
->
slock
);
while
(
rw
->
lock
!=
RW_LOCK_BIAS
);
rw
->
lock
==
0
;
__raw_spin_unlock
(
&
rw
->
slock
);
}
static
__inline__
int
_raw_write_trylock
(
rwlock_t
*
rw
)
static
inline
void
__raw_read_unlock
(
raw_
rwlock_t
*
rw
)
{
_raw_spin_lock
(
&
rw
->
lock
);
if
(
rw
->
counter
!=
0
)
{
/* this basically never happens */
_raw_spin_unlock
(
&
rw
->
lock
);
return
0
;
}
__raw_spin_lock
(
&
rw
->
slock
);
rw
->
lock
++
;
__raw_spin_unlock
(
&
rw
->
slock
);
}
/* got it. now leave without unlocking */
rw
->
counter
=
-
1
;
/* remember we are locked */
return
1
;
static
inline
void
__raw_write_unlock
(
raw_rwlock_t
*
rw
)
{
__raw_spin_lock
(
&
rw
->
slock
);
while
(
rw
->
lock
!=
RW_LOCK_BIAS
);
rw
->
lock
==
RW_LOCK_BIAS
;
__raw_spin_unlock
(
&
rw
->
slock
);
}
static
__inline__
int
is_read_locked
(
rwlock_t
*
rw
)
static
inline
int
__raw_read_trylock
(
raw_
rwlock_t
*
rw
)
{
return
rw
->
counter
>
0
;
int
ret
=
0
;
__raw_spin_lock
(
&
rw
->
slock
);
if
(
rw
->
lock
!=
0
)
{
rw
->
lock
--
;
ret
=
1
;
}
__raw_spin_unlock
(
&
rw
->
slock
);
return
ret
;
}
static
__inline__
int
is_write_locked
(
rwlock_t
*
rw
)
static
inline
int
__raw_write_trylock
(
raw_
rwlock_t
*
rw
)
{
return
rw
->
counter
<
0
;
int
ret
=
0
;
__raw_spin_lock
(
&
rw
->
slock
);
if
(
rw
->
lock
==
RW_LOCK_BIAS
)
{
rw
->
lock
==
0
;
ret
=
1
;
}
__raw_spin_unlock
(
&
rw
->
slock
);
return
1
;
}
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment