Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
56d8b39d
Commit
56d8b39d
authored
Sep 25, 2002
by
Patrick Mochel
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://ldm@bkbits.net/linux-2.5
into osdl.org:/home/mochel/src/kernel/devel/linux-2.5
parents
abe2e064
4a99b33d
Changes
10
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
46 additions
and
35 deletions
+46
-35
drivers/input/mousedev.c
drivers/input/mousedev.c
+11
-8
fs/jfs/jfs_dmap.c
fs/jfs/jfs_dmap.c
+10
-10
fs/locks.c
fs/locks.c
+1
-1
include/linux/brlock.h
include/linux/brlock.h
+4
-1
include/linux/netdevice.h
include/linux/netdevice.h
+8
-4
include/linux/page-flags.h
include/linux/page-flags.h
+3
-3
kernel/exit.c
kernel/exit.c
+1
-1
mm/highmem.c
mm/highmem.c
+2
-0
mm/mprotect.c
mm/mprotect.c
+5
-0
mm/slab.c
mm/slab.c
+1
-7
No files found.
drivers/input/mousedev.c
View file @
56d8b39d
...
...
@@ -380,18 +380,21 @@ static ssize_t mousedev_read(struct file * file, char * buffer, size_t count, lo
if
(
!
list
->
ready
&&
!
list
->
buffer
)
{
add_wait_queue
(
&
list
->
mousedev
->
wait
,
&
wait
);
for
(;;)
{
set_current_state
(
TASK_INTERRUPTIBLE
);
while
(
!
list
->
ready
)
{
retval
=
0
;
if
(
list
->
ready
||
list
->
buffer
)
break
;
if
(
file
->
f_flags
&
O_NONBLOCK
)
{
retval
=
-
EAGAIN
;
if
(
file
->
f_flags
&
O_NONBLOCK
)
break
;
}
if
(
signal_pending
(
current
))
{
retval
=
-
ERESTARTSYS
;
if
(
signal_pending
(
current
))
break
;
}
schedule
();
}
...
...
fs/jfs/jfs_dmap.c
View file @
56d8b39d
...
...
@@ -648,7 +648,7 @@ int dbNextAG(struct inode *ipbmap)
agpref
=
bmp
->
db_agpref
;
if
((
atomic_read
(
&
bmp
->
db_active
[
agpref
])
==
0
)
&&
(
bmp
->
db_agfree
[
agpref
]
>=
avgfree
))
goto
found
;
goto
unlock
;
/* From the last preferred ag, find the next one with at least
* average free space.
...
...
@@ -660,9 +660,12 @@ int dbNextAG(struct inode *ipbmap)
if
(
atomic_read
(
&
bmp
->
db_active
[
agpref
]))
/* open file is currently growing in this ag */
continue
;
if
(
bmp
->
db_agfree
[
agpref
]
>=
avgfree
)
goto
found
;
else
if
(
bmp
->
db_agfree
[
agpref
]
>
hwm
)
{
if
(
bmp
->
db_agfree
[
agpref
]
>=
avgfree
)
{
/* Return this one */
bmp
->
db_agpref
=
agpref
;
goto
unlock
;
}
else
if
(
bmp
->
db_agfree
[
agpref
]
>
hwm
)
{
/* Less than avg. freespace, but best so far */
hwm
=
bmp
->
db_agfree
[
agpref
];
next_best
=
agpref
;
}
...
...
@@ -673,12 +676,9 @@ int dbNextAG(struct inode *ipbmap)
* next best
*/
if
(
next_best
!=
-
1
)
agpref
=
next_best
;
/* else agpref should be back to its original value */
found:
bmp
->
db_agpref
=
agpref
;
bmp
->
db_agpref
=
next_best
;
/* else leave db_agpref unchanged */
unlock:
BMAP_UNLOCK
(
bmp
);
/* return the preferred group.
...
...
fs/locks.c
View file @
56d8b39d
...
...
@@ -253,7 +253,7 @@ static int flock_make_lock(struct file *filp,
fl
->
fl_file
=
filp
;
fl
->
fl_pid
=
current
->
pid
;
fl
->
fl_flags
=
FL_FLOCK
;
fl
->
fl_flags
=
(
cmd
&
LOCK_NB
)
?
FL_FLOCK
:
FL_FLOCK
|
FL_SLEEP
;
fl
->
fl_type
=
type
;
fl
->
fl_end
=
OFFSET_MAX
;
...
...
include/linux/brlock.h
View file @
56d8b39d
...
...
@@ -85,7 +85,8 @@ static inline void br_read_lock (enum brlock_indices idx)
if
(
idx
>=
__BR_END
)
__br_lock_usage_bug
();
read_lock
(
&
__brlock_array
[
smp_processor_id
()][
idx
]);
preempt_disable
();
_raw_read_lock
(
&
__brlock_array
[
smp_processor_id
()][
idx
]);
}
static
inline
void
br_read_unlock
(
enum
brlock_indices
idx
)
...
...
@@ -109,6 +110,7 @@ static inline void br_read_lock (enum brlock_indices idx)
if
(
idx
>=
__BR_END
)
__br_lock_usage_bug
();
preempt_disable
();
ctr
=
&
__brlock_array
[
smp_processor_id
()][
idx
];
lock
=
&
__br_write_locks
[
idx
].
lock
;
again:
...
...
@@ -147,6 +149,7 @@ static inline void br_read_unlock (enum brlock_indices idx)
wmb
();
(
*
ctr
)
--
;
preempt_enable
();
}
#endif
/* __BRLOCK_USE_ATOMICS */
...
...
include/linux/netdevice.h
View file @
56d8b39d
...
...
@@ -514,9 +514,10 @@ static inline void __netif_schedule(struct net_device *dev)
{
if
(
!
test_and_set_bit
(
__LINK_STATE_SCHED
,
&
dev
->
state
))
{
unsigned
long
flags
;
int
cpu
=
smp_processor_id
()
;
int
cpu
;
local_irq_save
(
flags
);
cpu
=
smp_processor_id
();
dev
->
next_sched
=
softnet_data
[
cpu
].
output_queue
;
softnet_data
[
cpu
].
output_queue
=
dev
;
cpu_raise_softirq
(
cpu
,
NET_TX_SOFTIRQ
);
...
...
@@ -563,10 +564,11 @@ static inline int netif_running(struct net_device *dev)
static
inline
void
dev_kfree_skb_irq
(
struct
sk_buff
*
skb
)
{
if
(
atomic_dec_and_test
(
&
skb
->
users
))
{
int
cpu
=
smp_processor_id
()
;
int
cpu
;
unsigned
long
flags
;
local_irq_save
(
flags
);
cpu
=
smp_processor_id
();
skb
->
next
=
softnet_data
[
cpu
].
completion_queue
;
softnet_data
[
cpu
].
completion_queue
=
skb
;
cpu_raise_softirq
(
cpu
,
NET_TX_SOFTIRQ
);
...
...
@@ -726,9 +728,10 @@ static inline int netif_rx_schedule_prep(struct net_device *dev)
static
inline
void
__netif_rx_schedule
(
struct
net_device
*
dev
)
{
unsigned
long
flags
;
int
cpu
=
smp_processor_id
()
;
int
cpu
;
local_irq_save
(
flags
);
cpu
=
smp_processor_id
();
dev_hold
(
dev
);
list_add_tail
(
&
dev
->
poll_list
,
&
softnet_data
[
cpu
].
poll_list
);
if
(
dev
->
quota
<
0
)
...
...
@@ -754,11 +757,12 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo)
{
if
(
netif_rx_schedule_prep
(
dev
))
{
unsigned
long
flags
;
int
cpu
=
smp_processor_id
()
;
int
cpu
;
dev
->
quota
+=
undo
;
local_irq_save
(
flags
);
cpu
=
smp_processor_id
();
list_add_tail
(
&
dev
->
poll_list
,
&
softnet_data
[
cpu
].
poll_list
);
__cpu_raise_softirq
(
cpu
,
NET_RX_SOFTIRQ
);
local_irq_restore
(
flags
);
...
...
include/linux/page-flags.h
View file @
56d8b39d
...
...
@@ -86,9 +86,9 @@ extern void get_page_state(struct page_state *ret);
#define mod_page_state(member, delta) \
do { \
preempt_disable
(); \
page_states[
smp_processor_id()].member += (delta);
\
p
reempt_enable();
\
int cpu = get_cpu
(); \
page_states[
cpu].member += (delta);
\
p
ut_cpu();
\
} while (0)
#define inc_page_state(member) mod_page_state(member, 1UL)
...
...
kernel/exit.c
View file @
56d8b39d
...
...
@@ -626,7 +626,7 @@ NORET_TYPE void do_exit(long code)
tsk
->
flags
|=
PF_EXITING
;
del_timer_sync
(
&
tsk
->
real_timer
);
if
(
unlikely
(
preempt_count
()))
if
(
unlikely
(
in_atomic
()))
printk
(
KERN_INFO
"note: %s[%d] exited with preempt_count %d
\n
"
,
current
->
comm
,
current
->
pid
,
preempt_count
());
...
...
mm/highmem.c
View file @
56d8b39d
...
...
@@ -472,6 +472,7 @@ void check_highmem_ptes(void)
{
int
idx
,
type
;
preempt_disable
();
for
(
type
=
0
;
type
<
KM_TYPE_NR
;
type
++
)
{
idx
=
type
+
KM_TYPE_NR
*
smp_processor_id
();
if
(
!
pte_none
(
*
(
kmap_pte
-
idx
)))
{
...
...
@@ -479,6 +480,7 @@ void check_highmem_ptes(void)
BUG
();
}
}
preempt_enable
();
}
#endif
mm/mprotect.c
View file @
56d8b39d
...
...
@@ -193,6 +193,11 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
if
(
error
)
goto
fail
;
}
/*
* Unless it returns an error, this function always sets *pprev to
* the first vma for which vma->vm_end >= end.
*/
*
pprev
=
vma
;
if
(
end
!=
vma
->
vm_end
)
{
error
=
split_vma
(
mm
,
vma
,
end
,
0
);
...
...
mm/slab.c
View file @
56d8b39d
...
...
@@ -1357,11 +1357,7 @@ void* kmem_cache_alloc_batch(kmem_cache_t* cachep, int flags)
cc_entry
(
cc
)[
cc
->
avail
++
]
=
kmem_cache_alloc_one_tail
(
cachep
,
slabp
);
}
/*
* CAREFUL: do not enable preemption yet, the per-CPU
* entries rely on us being atomic.
*/
_raw_spin_unlock
(
&
cachep
->
spinlock
);
spin_unlock
(
&
cachep
->
spinlock
);
if
(
cc
->
avail
)
return
cc_entry
(
cc
)[
--
cc
->
avail
];
...
...
@@ -1389,8 +1385,6 @@ static inline void * __kmem_cache_alloc (kmem_cache_t *cachep, int flags)
STATS_INC_ALLOCMISS
(
cachep
);
objp
=
kmem_cache_alloc_batch
(
cachep
,
flags
);
local_irq_restore
(
save_flags
);
/* end of non-preemptible region */
preempt_enable
();
if
(
!
objp
)
goto
alloc_new_slab_nolock
;
return
objp
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment