Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
d73794d6
Commit
d73794d6
authored
Sep 04, 2002
by
Erich Focht
Committed by
David Mosberger
Sep 04, 2002
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] Remove global semaphore_lock for ia64, similar to i386 change for 2.5.25
parent
a4be0c60
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
35 additions
and
27 deletions
+35
-27
arch/ia64/kernel/semaphore.c
arch/ia64/kernel/semaphore.c
+35
-27
No files found.
arch/ia64/kernel/semaphore.c
View file @
d73794d6
...
...
@@ -15,8 +15,8 @@
* test if they need to do any extra work (up needs to do something
* only if count was negative before the increment operation.
*
* "sleep
ers" and the contention routine ordering is protected by the
*
semaphore spinlock
.
* "sleep
ing" and the contention routine ordering is protected
*
by the spinlock in the semaphore's waitqueue head
.
*
* Note that these functions are only called when there is contention
* on the lock, and as such all this is the "non-critical" part of the
...
...
@@ -44,40 +44,42 @@ __up (struct semaphore *sem)
wake_up
(
&
sem
->
wait
);
}
static
spinlock_t
semaphore_lock
=
SPIN_LOCK_UNLOCKED
;
void
__down
(
struct
semaphore
*
sem
)
{
struct
task_struct
*
tsk
=
current
;
DECLARE_WAITQUEUE
(
wait
,
tsk
);
unsigned
long
flags
;
tsk
->
state
=
TASK_UNINTERRUPTIBLE
;
add_wait_queue_exclusive
(
&
sem
->
wait
,
&
wait
);
spin_lock_irqsave
(
&
sem
->
wait
.
lock
,
flags
);
add_wait_queue_exclusive_locked
(
&
sem
->
wait
,
&
wait
);
spin_lock_irq
(
&
semaphore_lock
);
sem
->
sleepers
++
;
for
(;;)
{
int
sleepers
=
sem
->
sleepers
;
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock.
* playing, because we own the spinlock in
* the wait_queue_head.
*/
if
(
!
atomic_add_negative
(
sleepers
-
1
,
&
sem
->
count
))
{
sem
->
sleepers
=
0
;
break
;
}
sem
->
sleepers
=
1
;
/* us - see -1 above */
spin_unlock_irq
(
&
semaphore_lock
);
spin_unlock_irq
restore
(
&
sem
->
wait
.
lock
,
flags
);
schedule
();
spin_lock_irqsave
(
&
sem
->
wait
.
lock
,
flags
);
tsk
->
state
=
TASK_UNINTERRUPTIBLE
;
spin_lock_irq
(
&
semaphore_lock
);
}
spin_unlock_irq
(
&
semaphore_lock
);
remove_wait_queue
(
&
sem
->
wait
,
&
wait
);
remove_wait_queue_locked
(
&
sem
->
wait
,
&
wait
);
wake_up_locked
(
&
sem
->
wait
);
spin_unlock_irqrestore
(
&
sem
->
wait
.
lock
,
flags
);
tsk
->
state
=
TASK_RUNNING
;
wake_up
(
&
sem
->
wait
);
}
int
...
...
@@ -86,10 +88,12 @@ __down_interruptible (struct semaphore * sem)
int
retval
=
0
;
struct
task_struct
*
tsk
=
current
;
DECLARE_WAITQUEUE
(
wait
,
tsk
);
unsigned
long
flags
;
tsk
->
state
=
TASK_INTERRUPTIBLE
;
add_wait_queue_exclusive
(
&
sem
->
wait
,
&
wait
);
spin_lock_irqsave
(
&
sem
->
wait
.
lock
,
flags
);
add_wait_queue_exclusive_locked
(
&
sem
->
wait
,
&
wait
);
spin_lock_irq
(
&
semaphore_lock
);
sem
->
sleepers
++
;
for
(;;)
{
int
sleepers
=
sem
->
sleepers
;
...
...
@@ -110,25 +114,27 @@ __down_interruptible (struct semaphore * sem)
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock
. The
*
"-1" is because we're still hoping to get
*
the lock
.
* playing, because we own the spinlock
in
*
wait_queue_head. The "-1" is because we're
*
still hoping to get the semaphore
.
*/
if
(
!
atomic_add_negative
(
sleepers
-
1
,
&
sem
->
count
))
{
sem
->
sleepers
=
0
;
break
;
}
sem
->
sleepers
=
1
;
/* us - see -1 above */
spin_unlock_irq
(
&
semaphore_lock
);
spin_unlock_irq
restore
(
&
sem
->
wait
.
lock
,
flags
);
schedule
();
spin_lock_irqsave
(
&
sem
->
wait
.
lock
,
flags
);
tsk
->
state
=
TASK_INTERRUPTIBLE
;
spin_lock_irq
(
&
semaphore_lock
);
}
spin_unlock_irq
(
&
semaphore_lock
);
remove_wait_queue_locked
(
&
sem
->
wait
,
&
wait
);
wake_up_locked
(
&
sem
->
wait
);
spin_unlock_irqrestore
(
&
sem
->
wait
.
lock
,
flags
);
tsk
->
state
=
TASK_RUNNING
;
remove_wait_queue
(
&
sem
->
wait
,
&
wait
);
wake_up
(
&
sem
->
wait
);
return
retval
;
}
...
...
@@ -142,17 +148,19 @@ __down_trylock (struct semaphore *sem)
unsigned
long
flags
;
int
sleepers
;
spin_lock_irqsave
(
&
sem
aphore_
lock
,
flags
);
spin_lock_irqsave
(
&
sem
->
wait
.
lock
,
flags
);
sleepers
=
sem
->
sleepers
+
1
;
sem
->
sleepers
=
0
;
/*
* Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock.
* playing, because we own the spinlock in the
* wait_queue_head.
*/
if
(
!
atomic_add_negative
(
sleepers
,
&
sem
->
count
))
wake_up
(
&
sem
->
wait
);
if
(
!
atomic_add_negative
(
sleepers
,
&
sem
->
count
))
{
wake_up_locked
(
&
sem
->
wait
);
}
spin_unlock_irqrestore
(
&
sem
aphore_
lock
,
flags
);
spin_unlock_irqrestore
(
&
sem
->
wait
.
lock
,
flags
);
return
1
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment