Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
d73794d6
Commit
d73794d6
authored
Sep 04, 2002
by
Erich Focht
Committed by
David Mosberger
Sep 04, 2002
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] Remove global semaphore_lock for ia64, similar to i386 change for 2.5.25
parent
a4be0c60
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
35 additions
and
27 deletions
+35
-27
arch/ia64/kernel/semaphore.c
arch/ia64/kernel/semaphore.c
+35
-27
No files found.
arch/ia64/kernel/semaphore.c
View file @
d73794d6
...
@@ -15,8 +15,8 @@
...
@@ -15,8 +15,8 @@
* test if they need to do any extra work (up needs to do something
* test if they need to do any extra work (up needs to do something
* only if count was negative before the increment operation.
* only if count was negative before the increment operation.
*
*
* "sleep
ers" and the contention routine ordering is protected by the
* "sleep
ing" and the contention routine ordering is protected
*
semaphore spinlock
.
*
by the spinlock in the semaphore's waitqueue head
.
*
*
* Note that these functions are only called when there is contention
* Note that these functions are only called when there is contention
* on the lock, and as such all this is the "non-critical" part of the
* on the lock, and as such all this is the "non-critical" part of the
...
@@ -44,40 +44,42 @@ __up (struct semaphore *sem)
...
@@ -44,40 +44,42 @@ __up (struct semaphore *sem)
wake_up
(
&
sem
->
wait
);
wake_up
(
&
sem
->
wait
);
}
}
static
spinlock_t
semaphore_lock
=
SPIN_LOCK_UNLOCKED
;
void
void
__down
(
struct
semaphore
*
sem
)
__down
(
struct
semaphore
*
sem
)
{
{
struct
task_struct
*
tsk
=
current
;
struct
task_struct
*
tsk
=
current
;
DECLARE_WAITQUEUE
(
wait
,
tsk
);
DECLARE_WAITQUEUE
(
wait
,
tsk
);
unsigned
long
flags
;
tsk
->
state
=
TASK_UNINTERRUPTIBLE
;
tsk
->
state
=
TASK_UNINTERRUPTIBLE
;
add_wait_queue_exclusive
(
&
sem
->
wait
,
&
wait
);
spin_lock_irqsave
(
&
sem
->
wait
.
lock
,
flags
);
add_wait_queue_exclusive_locked
(
&
sem
->
wait
,
&
wait
);
spin_lock_irq
(
&
semaphore_lock
);
sem
->
sleepers
++
;
sem
->
sleepers
++
;
for
(;;)
{
for
(;;)
{
int
sleepers
=
sem
->
sleepers
;
int
sleepers
=
sem
->
sleepers
;
/*
/*
* Add "everybody else" into it. They aren't
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock.
* playing, because we own the spinlock in
* the wait_queue_head.
*/
*/
if
(
!
atomic_add_negative
(
sleepers
-
1
,
&
sem
->
count
))
{
if
(
!
atomic_add_negative
(
sleepers
-
1
,
&
sem
->
count
))
{
sem
->
sleepers
=
0
;
sem
->
sleepers
=
0
;
break
;
break
;
}
}
sem
->
sleepers
=
1
;
/* us - see -1 above */
sem
->
sleepers
=
1
;
/* us - see -1 above */
spin_unlock_irq
(
&
semaphore_lock
);
spin_unlock_irq
restore
(
&
sem
->
wait
.
lock
,
flags
);
schedule
();
schedule
();
spin_lock_irqsave
(
&
sem
->
wait
.
lock
,
flags
);
tsk
->
state
=
TASK_UNINTERRUPTIBLE
;
tsk
->
state
=
TASK_UNINTERRUPTIBLE
;
spin_lock_irq
(
&
semaphore_lock
);
}
}
spin_unlock_irq
(
&
semaphore_lock
);
remove_wait_queue_locked
(
&
sem
->
wait
,
&
wait
);
remove_wait_queue
(
&
sem
->
wait
,
&
wait
);
wake_up_locked
(
&
sem
->
wait
);
spin_unlock_irqrestore
(
&
sem
->
wait
.
lock
,
flags
);
tsk
->
state
=
TASK_RUNNING
;
tsk
->
state
=
TASK_RUNNING
;
wake_up
(
&
sem
->
wait
);
}
}
int
int
...
@@ -86,10 +88,12 @@ __down_interruptible (struct semaphore * sem)
...
@@ -86,10 +88,12 @@ __down_interruptible (struct semaphore * sem)
int
retval
=
0
;
int
retval
=
0
;
struct
task_struct
*
tsk
=
current
;
struct
task_struct
*
tsk
=
current
;
DECLARE_WAITQUEUE
(
wait
,
tsk
);
DECLARE_WAITQUEUE
(
wait
,
tsk
);
unsigned
long
flags
;
tsk
->
state
=
TASK_INTERRUPTIBLE
;
tsk
->
state
=
TASK_INTERRUPTIBLE
;
add_wait_queue_exclusive
(
&
sem
->
wait
,
&
wait
);
spin_lock_irqsave
(
&
sem
->
wait
.
lock
,
flags
);
add_wait_queue_exclusive_locked
(
&
sem
->
wait
,
&
wait
);
spin_lock_irq
(
&
semaphore_lock
);
sem
->
sleepers
++
;
sem
->
sleepers
++
;
for
(;;)
{
for
(;;)
{
int
sleepers
=
sem
->
sleepers
;
int
sleepers
=
sem
->
sleepers
;
...
@@ -110,25 +114,27 @@ __down_interruptible (struct semaphore * sem)
...
@@ -110,25 +114,27 @@ __down_interruptible (struct semaphore * sem)
/*
/*
* Add "everybody else" into it. They aren't
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock
. The
* playing, because we own the spinlock
in
*
"-1" is because we're still hoping to get
*
wait_queue_head. The "-1" is because we're
*
the lock
.
*
still hoping to get the semaphore
.
*/
*/
if
(
!
atomic_add_negative
(
sleepers
-
1
,
&
sem
->
count
))
{
if
(
!
atomic_add_negative
(
sleepers
-
1
,
&
sem
->
count
))
{
sem
->
sleepers
=
0
;
sem
->
sleepers
=
0
;
break
;
break
;
}
}
sem
->
sleepers
=
1
;
/* us - see -1 above */
sem
->
sleepers
=
1
;
/* us - see -1 above */
spin_unlock_irq
(
&
semaphore_lock
);
spin_unlock_irq
restore
(
&
sem
->
wait
.
lock
,
flags
);
schedule
();
schedule
();
spin_lock_irqsave
(
&
sem
->
wait
.
lock
,
flags
);
tsk
->
state
=
TASK_INTERRUPTIBLE
;
tsk
->
state
=
TASK_INTERRUPTIBLE
;
spin_lock_irq
(
&
semaphore_lock
);
}
}
spin_unlock_irq
(
&
semaphore_lock
);
remove_wait_queue_locked
(
&
sem
->
wait
,
&
wait
);
wake_up_locked
(
&
sem
->
wait
);
spin_unlock_irqrestore
(
&
sem
->
wait
.
lock
,
flags
);
tsk
->
state
=
TASK_RUNNING
;
tsk
->
state
=
TASK_RUNNING
;
remove_wait_queue
(
&
sem
->
wait
,
&
wait
);
wake_up
(
&
sem
->
wait
);
return
retval
;
return
retval
;
}
}
...
@@ -142,17 +148,19 @@ __down_trylock (struct semaphore *sem)
...
@@ -142,17 +148,19 @@ __down_trylock (struct semaphore *sem)
unsigned
long
flags
;
unsigned
long
flags
;
int
sleepers
;
int
sleepers
;
spin_lock_irqsave
(
&
sem
aphore_
lock
,
flags
);
spin_lock_irqsave
(
&
sem
->
wait
.
lock
,
flags
);
sleepers
=
sem
->
sleepers
+
1
;
sleepers
=
sem
->
sleepers
+
1
;
sem
->
sleepers
=
0
;
sem
->
sleepers
=
0
;
/*
/*
* Add "everybody else" and us into it. They aren't
* Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock.
* playing, because we own the spinlock in the
* wait_queue_head.
*/
*/
if
(
!
atomic_add_negative
(
sleepers
,
&
sem
->
count
))
if
(
!
atomic_add_negative
(
sleepers
,
&
sem
->
count
))
{
wake_up
(
&
sem
->
wait
);
wake_up_locked
(
&
sem
->
wait
);
}
spin_unlock_irqrestore
(
&
sem
aphore_
lock
,
flags
);
spin_unlock_irqrestore
(
&
sem
->
wait
.
lock
,
flags
);
return
1
;
return
1
;
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment