Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
61887e47
Commit
61887e47
authored
Jul 28, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://bkbits.ras.ucalgary.ca/rgooch-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
47fff65a
39520ba4
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
154 additions
and
15 deletions
+154
-15
arch/i386/kernel/process.c
arch/i386/kernel/process.c
+8
-8
include/asm-i386/rwsem.h
include/asm-i386/rwsem.h
+36
-0
include/linux/rwsem-spinlock.h
include/linux/rwsem-spinlock.h
+3
-0
include/linux/rwsem.h
include/linux/rwsem.h
+24
-1
lib/rwsem-spinlock.c
lib/rwsem-spinlock.c
+83
-6
No files found.
arch/i386/kernel/process.c
View file @
61887e47
...
...
@@ -674,6 +674,14 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
tss
->
esp0
=
next
->
esp0
;
/*
* Load the per-thread Thread-Local Storage descriptor.
*
* NOTE: it's faster to do the two stores unconditionally
* than to branch away.
*/
load_TLS_desc
(
next
,
cpu
);
/*
* Save away %fs and %gs. No need to save %es and %ds, as
* those are always kernel segments while inside the kernel.
...
...
@@ -689,14 +697,6 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
loadsegment
(
gs
,
next
->
gs
);
}
/*
* Load the per-thread Thread-Local Storage descriptor.
*
* NOTE: it's faster to do the two stores unconditionally
* than to branch away.
*/
load_TLS_desc
(
next
,
cpu
);
/*
* Now maybe reload the debug registers
*/
...
...
include/asm-i386/rwsem.h
View file @
61887e47
...
...
@@ -117,6 +117,29 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value
:
"memory"
,
"cc"
);
}
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
static
inline
int
__down_read_trylock
(
struct
rw_semaphore
*
sem
)
{
__s32
result
,
tmp
;
__asm__
__volatile__
(
"# beginning __down_read_trylock
\n\t
"
" movl %0,%1
\n\t
"
"1:
\n\t
"
" movl %1,%2
\n\t
"
" addl %3,%2
\n\t
"
" jle 2f
\n\t
"
LOCK_PREFIX
" cmpxchgl %2,%0
\n\t
"
" jnz 1b
\n\t
"
"2:
\n\t
"
"# ending __down_read_trylock
\n\t
"
:
"+m"
(
sem
->
count
),
"=&a"
(
result
),
"=&r"
(
tmp
)
:
"i"
(
RWSEM_ACTIVE_READ_BIAS
)
:
"memory"
,
"cc"
);
return
result
>=
0
?
1
:
0
;
}
/*
* lock for writing
*/
...
...
@@ -144,6 +167,19 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the
:
"memory"
,
"cc"
);
}
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
static
inline
int
__down_write_trylock
(
struct
rw_semaphore
*
sem
)
{
signed
long
ret
=
cmpxchg
(
&
sem
->
count
,
RWSEM_UNLOCKED_VALUE
,
RWSEM_ACTIVE_WRITE_BIAS
);
if
(
ret
==
RWSEM_UNLOCKED_VALUE
)
return
1
;
return
0
;
}
/*
* unlock after reading
*/
...
...
include/linux/rwsem-spinlock.h
View file @
61887e47
...
...
@@ -54,9 +54,12 @@ struct rw_semaphore {
extern
void
FASTCALL
(
init_rwsem
(
struct
rw_semaphore
*
sem
));
extern
void
FASTCALL
(
__down_read
(
struct
rw_semaphore
*
sem
));
extern
int
FASTCALL
(
__down_read_trylock
(
struct
rw_semaphore
*
sem
));
extern
void
FASTCALL
(
__down_write
(
struct
rw_semaphore
*
sem
));
extern
int
FASTCALL
(
__down_write_trylock
(
struct
rw_semaphore
*
sem
));
extern
void
FASTCALL
(
__up_read
(
struct
rw_semaphore
*
sem
));
extern
void
FASTCALL
(
__up_write
(
struct
rw_semaphore
*
sem
));
extern
void
FASTCALL
(
__downgrade_write
(
struct
rw_semaphore
*
sem
));
#endif
/* __KERNEL__ */
#endif
/* _LINUX_RWSEM_SPINLOCK_H */
include/linux/rwsem.h
View file @
61887e47
...
...
@@ -45,6 +45,18 @@ static inline void down_read(struct rw_semaphore *sem)
rwsemtrace
(
sem
,
"Leaving down_read"
);
}
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
static
inline
int
down_read_trylock
(
struct
rw_semaphore
*
sem
)
{
int
ret
;
rwsemtrace
(
sem
,
"Entering down_read_trylock"
);
ret
=
__down_read_trylock
(
sem
);
rwsemtrace
(
sem
,
"Leaving down_read_trylock"
);
return
ret
;
}
/*
* lock for writing
*/
...
...
@@ -55,6 +67,18 @@ static inline void down_write(struct rw_semaphore *sem)
rwsemtrace
(
sem
,
"Leaving down_write"
);
}
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
static
inline
int
down_write_trylock
(
struct
rw_semaphore
*
sem
)
{
int
ret
;
rwsemtrace
(
sem
,
"Entering down_write_trylock"
);
ret
=
__down_write_trylock
(
sem
);
rwsemtrace
(
sem
,
"Leaving down_write_trylock"
);
return
ret
;
}
/*
* release a read lock
*/
...
...
@@ -85,6 +109,5 @@ static inline void downgrade_write(struct rw_semaphore *sem)
rwsemtrace
(
sem
,
"Leaving downgrade_write"
);
}
#endif
/* __KERNEL__ */
#endif
/* _LINUX_RWSEM_H */
lib/rwsem-spinlock.c
View file @
61887e47
...
...
@@ -46,8 +46,9 @@ void init_rwsem(struct rw_semaphore *sem)
* - the 'waiting count' is non-zero
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having flags zeroised
* - writers are only woken if wakewrite is non-zero
*/
static
inline
struct
rw_semaphore
*
__rwsem_do_wake
(
struct
rw_semaphore
*
sem
)
static
inline
struct
rw_semaphore
*
__rwsem_do_wake
(
struct
rw_semaphore
*
sem
,
int
wakewrite
)
{
struct
rwsem_waiter
*
waiter
;
int
woken
;
...
...
@@ -56,7 +57,14 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
waiter
=
list_entry
(
sem
->
wait_list
.
next
,
struct
rwsem_waiter
,
list
);
/* try to grant a single write lock if there's a writer at the front of the queue
if
(
!
wakewrite
)
{
if
(
waiter
->
flags
&
RWSEM_WAITING_FOR_WRITE
)
goto
out
;
goto
dont_wake_writers
;
}
/* if we are allowed to wake writers try to grant a single write lock if there's a
* writer at the front of the queue
* - we leave the 'waiting count' incremented to signify potential contention
*/
if
(
waiter
->
flags
&
RWSEM_WAITING_FOR_WRITE
)
{
...
...
@@ -68,16 +76,19 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
}
/* grant an infinite number of read locks to the readers at the front of the queue */
dont_wake_writers:
woken
=
0
;
do
{
while
(
waiter
->
flags
&
RWSEM_WAITING_FOR_READ
)
{
struct
list_head
*
next
=
waiter
->
list
.
next
;
list_del
(
&
waiter
->
list
);
waiter
->
flags
=
0
;
wake_up_process
(
waiter
->
task
);
woken
++
;
if
(
list_empty
(
&
sem
->
wait_list
))
break
;
waiter
=
list_entry
(
sem
->
wait_list
.
next
,
struct
rwsem_waiter
,
list
);
}
while
(
waiter
->
flags
&
RWSEM_WAITING_FOR_READ
);
waiter
=
list_entry
(
next
,
struct
rwsem_waiter
,
list
);
}
sem
->
activity
+=
woken
;
...
...
@@ -148,6 +159,28 @@ void __down_read(struct rw_semaphore *sem)
rwsemtrace
(
sem
,
"Leaving __down_read"
);
}
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
int
__down_read_trylock
(
struct
rw_semaphore
*
sem
)
{
int
ret
=
0
;
rwsemtrace
(
sem
,
"Entering __down_read_trylock"
);
spin_lock
(
&
sem
->
wait_lock
);
if
(
sem
->
activity
>=
0
&&
list_empty
(
&
sem
->
wait_list
))
{
/* granted */
sem
->
activity
++
;
ret
=
1
;
}
spin_unlock
(
&
sem
->
wait_lock
);
rwsemtrace
(
sem
,
"Leaving __down_read_trylock"
);
return
ret
;
}
/*
* get a write lock on the semaphore
* - note that we increment the waiting count anyway to indicate an exclusive lock
...
...
@@ -194,6 +227,28 @@ void __down_write(struct rw_semaphore *sem)
rwsemtrace
(
sem
,
"Leaving __down_write"
);
}
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
int
__down_write_trylock
(
struct
rw_semaphore
*
sem
)
{
int
ret
=
0
;
rwsemtrace
(
sem
,
"Entering __down_write_trylock"
);
spin_lock
(
&
sem
->
wait_lock
);
if
(
sem
->
activity
==
0
&&
list_empty
(
&
sem
->
wait_list
))
{
/* granted */
sem
->
activity
=
-
1
;
ret
=
1
;
}
spin_unlock
(
&
sem
->
wait_lock
);
rwsemtrace
(
sem
,
"Leaving __down_write_trylock"
);
return
ret
;
}
/*
* release a read lock on the semaphore
*/
...
...
@@ -222,18 +277,40 @@ void __up_write(struct rw_semaphore *sem)
sem
->
activity
=
0
;
if
(
!
list_empty
(
&
sem
->
wait_list
))
sem
=
__rwsem_do_wake
(
sem
);
sem
=
__rwsem_do_wake
(
sem
,
1
);
spin_unlock
(
&
sem
->
wait_lock
);
rwsemtrace
(
sem
,
"Leaving __up_write"
);
}
/*
* downgrade a write lock into a read lock
* - just wake up any readers at the front of the queue
*/
void
__downgrade_write
(
struct
rw_semaphore
*
sem
)
{
rwsemtrace
(
sem
,
"Entering __rwsem_downgrade"
);
spin_lock
(
&
sem
->
wait_lock
);
sem
->
activity
=
1
;
if
(
!
list_empty
(
&
sem
->
wait_list
))
sem
=
__rwsem_do_wake
(
sem
,
0
);
spin_unlock
(
&
sem
->
wait_lock
);
rwsemtrace
(
sem
,
"Leaving __rwsem_downgrade"
);
}
EXPORT_SYMBOL
(
init_rwsem
);
EXPORT_SYMBOL
(
__down_read
);
EXPORT_SYMBOL
(
__down_read_trylock
);
EXPORT_SYMBOL
(
__down_write
);
EXPORT_SYMBOL
(
__down_write_trylock
);
EXPORT_SYMBOL
(
__up_read
);
EXPORT_SYMBOL
(
__up_write
);
EXPORT_SYMBOL
(
__downgrade_write
);
#if RWSEM_DEBUG
EXPORT_SYMBOL
(
rwsemtrace
);
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment