Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
0ed4dd24
Commit
0ed4dd24
authored
Jun 11, 2002
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
- rq-lock optimization in the preemption case, from Robert Love, plus some more cleanups.
parent
2b75b535
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
48 additions
and
27 deletions
+48
-27
include/linux/spinlock.h
include/linux/spinlock.h
+20
-13
kernel/sched.c
kernel/sched.c
+28
-14
No files found.
include/linux/spinlock.h
View file @
0ed4dd24
...
...
@@ -157,6 +157,12 @@ do { \
preempt_enable(); \
} while (0)
#define spin_unlock_no_resched(lock) \
do { \
_raw_spin_unlock(lock); \
preempt_enable_no_resched(); \
} while (0)
#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);})
#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();})
#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);})
...
...
@@ -166,20 +172,21 @@ do { \
#else
#define preempt_get_count() (0)
#define preempt_disable() do { } while (0)
#define preempt_get_count()
(0)
#define preempt_disable()
do { } while (0)
#define preempt_enable_no_resched() do {} while(0)
#define preempt_enable() do { } while (0)
#define spin_lock(lock) _raw_spin_lock(lock)
#define spin_trylock(lock) _raw_spin_trylock(lock)
#define spin_unlock(lock) _raw_spin_unlock(lock)
#define read_lock(lock) _raw_read_lock(lock)
#define read_unlock(lock) _raw_read_unlock(lock)
#define write_lock(lock) _raw_write_lock(lock)
#define write_unlock(lock) _raw_write_unlock(lock)
#define write_trylock(lock) _raw_write_trylock(lock)
#define preempt_enable() do { } while (0)
#define spin_lock(lock) _raw_spin_lock(lock)
#define spin_trylock(lock) _raw_spin_trylock(lock)
#define spin_unlock(lock) _raw_spin_unlock(lock)
#define spin_unlock_no_resched(lock) _raw_spin_unlock(lock)
#define read_lock(lock) _raw_read_lock(lock)
#define read_unlock(lock) _raw_read_unlock(lock)
#define write_lock(lock) _raw_write_lock(lock)
#define write_unlock(lock) _raw_write_unlock(lock)
#define write_trylock(lock) _raw_write_trylock(lock)
#endif
/* "lock on reference count zero" */
...
...
kernel/sched.c
View file @
0ed4dd24
...
...
@@ -152,17 +152,21 @@ static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define rt_task(p) ((p)->prio < MAX_RT_PRIO)
/*
* task_rq_lock - lock the runqueue a given task resides on and disable
* interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
*/
static
inline
runqueue_t
*
task_rq_lock
(
task_t
*
p
,
unsigned
long
*
flags
)
{
struct
runqueue
*
rq
;
repeat_lock_task:
preempt_disable
(
);
local_irq_save
(
*
flags
);
rq
=
task_rq
(
p
);
spin_lock
_irqsave
(
&
rq
->
lock
,
*
flags
);
spin_lock
(
&
rq
->
lock
);
if
(
unlikely
(
rq
!=
task_rq
(
p
)))
{
spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
preempt_enable
();
goto
repeat_lock_task
;
}
return
rq
;
...
...
@@ -171,7 +175,23 @@ static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
static
inline
void
task_rq_unlock
(
runqueue_t
*
rq
,
unsigned
long
*
flags
)
{
spin_unlock_irqrestore
(
&
rq
->
lock
,
*
flags
);
preempt_enable
();
}
/*
* rq_lock - lock a given runqueue and disable interrupts.
*/
static
inline
runqueue_t
*
rq_lock
(
runqueue_t
*
rq
)
{
local_irq_disable
();
rq
=
this_rq
();
spin_lock
(
&
rq
->
lock
);
return
rq
;
}
static
inline
void
rq_unlock
(
runqueue_t
*
rq
)
{
spin_unlock
(
&
rq
->
lock
);
local_irq_enable
();
}
/*
...
...
@@ -364,9 +384,7 @@ void wake_up_forked_process(task_t * p)
{
runqueue_t
*
rq
;
preempt_disable
();
rq
=
this_rq
();
spin_lock_irq
(
&
rq
->
lock
);
rq
=
rq_lock
(
rq
);
p
->
state
=
TASK_RUNNING
;
if
(
!
rt_task
(
p
))
{
...
...
@@ -382,8 +400,7 @@ void wake_up_forked_process(task_t * p)
p
->
thread_info
->
cpu
=
smp_processor_id
();
activate_task
(
p
,
rq
);
spin_unlock_irq
(
&
rq
->
lock
);
preempt_enable
();
rq_unlock
(
rq
);
}
/*
...
...
@@ -1367,8 +1384,7 @@ asmlinkage long sys_sched_yield(void)
runqueue_t
*
rq
;
prio_array_t
*
array
;
preempt_disable
();
rq
=
this_rq
();
rq
=
rq_lock
(
rq
);
/*
* Decrease the yielding task's priority by one, to avoid
...
...
@@ -1378,7 +1394,6 @@ asmlinkage long sys_sched_yield(void)
* If priority is already MAX_PRIO-1 then we still
* roundrobin the task within the runlist.
*/
spin_lock_irq
(
&
rq
->
lock
);
array
=
current
->
array
;
/*
* If the task has reached maximum priority (or is a RT task)
...
...
@@ -1395,8 +1410,7 @@ asmlinkage long sys_sched_yield(void)
list_add_tail
(
&
current
->
run_list
,
array
->
queue
+
current
->
prio
);
__set_bit
(
current
->
prio
,
array
->
bitmap
);
}
spin_unlock
(
&
rq
->
lock
);
preempt_enable_no_resched
();
spin_unlock_no_resched
(
&
rq
->
lock
);
schedule
();
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment