Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
950a2b0b
Commit
950a2b0b
authored
Feb 04, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
v2.4.0.4 -> v2.4.0.5
- ppp UP deadlock attack fix
parent
6aea1666
Changes
41
Hide whitespace changes
Inline
Side-by-side
Showing
41 changed files
with
360 additions
and
412 deletions
+360
-412
Makefile
Makefile
+2
-3
arch/i386/defconfig
arch/i386/defconfig
+2
-0
drivers/block/DAC960.c
drivers/block/DAC960.c
+0
-3
drivers/block/elevator.c
drivers/block/elevator.c
+60
-70
drivers/block/ll_rw_blk.c
drivers/block/ll_rw_blk.c
+92
-77
drivers/block/paride/pd.c
drivers/block/paride/pd.c
+0
-2
drivers/block/paride/pf.c
drivers/block/paride/pf.c
+0
-2
drivers/i2o/i2o_block.c
drivers/i2o/i2o_block.c
+0
-2
drivers/ide/ide-dma.c
drivers/ide/ide-dma.c
+7
-1
drivers/ide/ide-probe.c
drivers/ide/ide-probe.c
+4
-3
drivers/isdn/isdn_v110.c
drivers/isdn/isdn_v110.c
+2
-2
drivers/s390/block/dasd.c
drivers/s390/block/dasd.c
+0
-1
drivers/scsi/constants.c
drivers/scsi/constants.c
+1
-1
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_lib.c
+67
-115
drivers/scsi/scsi_merge.c
drivers/scsi/scsi_merge.c
+5
-7
drivers/scsi/sg.c
drivers/scsi/sg.c
+1
-0
drivers/scsi/sr.c
drivers/scsi/sr.c
+6
-4
fs/reiserfs/bitmap.c
fs/reiserfs/bitmap.c
+1
-0
fs/reiserfs/buffer2.c
fs/reiserfs/buffer2.c
+1
-0
fs/reiserfs/dir.c
fs/reiserfs/dir.c
+5
-0
fs/reiserfs/do_balan.c
fs/reiserfs/do_balan.c
+1
-0
fs/reiserfs/file.c
fs/reiserfs/file.c
+5
-2
fs/reiserfs/fix_node.c
fs/reiserfs/fix_node.c
+1
-0
fs/reiserfs/ibalance.c
fs/reiserfs/ibalance.c
+1
-0
fs/reiserfs/inode.c
fs/reiserfs/inode.c
+3
-2
fs/reiserfs/journal.c
fs/reiserfs/journal.c
+12
-5
fs/reiserfs/lbalance.c
fs/reiserfs/lbalance.c
+1
-0
fs/reiserfs/namei.c
fs/reiserfs/namei.c
+1
-0
fs/reiserfs/objectid.c
fs/reiserfs/objectid.c
+1
-0
fs/reiserfs/prints.c
fs/reiserfs/prints.c
+2
-2
fs/reiserfs/stree.c
fs/reiserfs/stree.c
+1
-0
fs/reiserfs/super.c
fs/reiserfs/super.c
+1
-0
fs/reiserfs/tail_conversion.c
fs/reiserfs/tail_conversion.c
+1
-0
include/linux/blk.h
include/linux/blk.h
+0
-4
include/linux/blkdev.h
include/linux/blkdev.h
+8
-8
include/linux/elevator.h
include/linux/elevator.h
+34
-44
include/linux/reiserfs_fs.h
include/linux/reiserfs_fs.h
+2
-4
include/linux/sched.h
include/linux/sched.h
+12
-8
kernel/sched.c
kernel/sched.c
+12
-36
mm/filemap.c
mm/filemap.c
+0
-4
scripts/checkconfig.pl
scripts/checkconfig.pl
+5
-0
No files found.
Makefile
View file @
950a2b0b
VERSION
=
2
PATCHLEVEL
=
4
SUBLEVEL
=
1
EXTRAVERSION
=
-pre
4
EXTRAVERSION
=
-pre
5
KERNELRELEASE
=
$(VERSION)
.
$(PATCHLEVEL)
.
$(SUBLEVEL)$(EXTRAVERSION)
...
...
@@ -457,9 +457,8 @@ export MODVERFILE
depend dep
:
dep-files
# make checkconfig: Prune 'scripts' directory to avoid "false positives".
checkconfig
:
find
*
-name
'*.[hcS]'
-type
f
-print
|
grep
-v
scripts/ |
sort
| xargs
$(PERL)
-w
scripts/checkconfig.pl
find
*
-name
'*.[hcS]'
-type
f
-print
|
sort
| xargs
$(PERL)
-w
scripts/checkconfig.pl
checkhelp
:
find
*
-name
[
cC]onfig.in
-print
|
sort
| xargs
$(PERL)
-w
scripts/checkhelp.pl
...
...
arch/i386/defconfig
View file @
950a2b0b
...
...
@@ -537,6 +537,8 @@ CONFIG_PCMCIA_SERIAL=y
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
CONFIG_AUTOFS4_FS=y
# CONFIG_REISERFS_FS is not set
# CONFIG_REISERFS_CHECK is not set
# CONFIG_ADFS_FS is not set
# CONFIG_ADFS_FS_RW is not set
# CONFIG_AFFS_FS is not set
...
...
drivers/block/DAC960.c
View file @
950a2b0b
...
...
@@ -1820,7 +1820,6 @@ static int DAC960_BackMergeFunction(RequestQueue_T *RequestQueue,
Request
->
nr_segments
<
Controller
->
DriverScatterGatherLimit
)
{
Request
->
nr_segments
++
;
RequestQueue
->
elevator
.
nr_segments
++
;
return
true
;
}
return
false
;
...
...
@@ -1844,7 +1843,6 @@ static int DAC960_FrontMergeFunction(RequestQueue_T *RequestQueue,
Request
->
nr_segments
<
Controller
->
DriverScatterGatherLimit
)
{
Request
->
nr_segments
++
;
RequestQueue
->
elevator
.
nr_segments
++
;
return
true
;
}
return
false
;
...
...
@@ -1874,7 +1872,6 @@ static int DAC960_MergeRequestsFunction(RequestQueue_T *RequestQueue,
if
(
TotalSegments
>
MaxSegments
||
TotalSegments
>
Controller
->
DriverScatterGatherLimit
)
return
false
;
RequestQueue
->
elevator
.
nr_segments
-=
SameSegment
;
Request
->
nr_segments
=
TotalSegments
;
return
true
;
}
...
...
drivers/block/elevator.c
View file @
950a2b0b
...
...
@@ -24,125 +24,115 @@
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/blk.h>
#include <linux/module.h>
#include <asm/uaccess.h>
/*
* Order ascending, but only allow a request to be skipped a certain
* number of times
*/
void
elevator_linus
(
struct
request
*
req
,
elevator_t
*
elevator
,
struct
list_head
*
real_head
,
struct
list_head
*
head
,
int
orig_latency
)
{
struct
list_head
*
entry
=
real_head
;
struct
request
*
tmp
;
req
->
elevator_sequence
=
orig_latency
;
while
((
entry
=
entry
->
prev
)
!=
head
)
{
tmp
=
blkdev_entry_to_request
(
entry
);
if
(
IN_ORDER
(
tmp
,
req
))
break
;
if
(
!
tmp
->
elevator_sequence
)
break
;
tmp
->
elevator_sequence
--
;
}
list_add
(
&
req
->
queue
,
entry
);
}
int
elevator_linus_merge
(
request_queue_t
*
q
,
struct
request
**
req
,
struct
list_head
*
head
,
struct
buffer_head
*
bh
,
int
rw
,
int
*
max_sectors
,
int
*
max_segments
)
int
max_sectors
,
int
max_segments
)
{
struct
list_head
*
entry
,
*
head
=
&
q
->
queue_head
;
struct
list_head
*
entry
=
&
q
->
queue_head
;
unsigned
int
count
=
bh
->
b_size
>>
9
,
ret
=
ELEVATOR_NO_MERGE
;
entry
=
head
;
if
(
q
->
head_active
&&
!
q
->
plugged
)
head
=
head
->
next
;
while
((
entry
=
entry
->
prev
)
!=
head
)
{
struct
request
*
__rq
=
*
req
=
blkdev_entry_to_request
(
entry
);
struct
request
*
__rq
=
blkdev_entry_to_request
(
entry
);
/*
* simply "aging" of requests in queue
*/
if
(
__rq
->
elevator_sequence
--
<=
0
)
{
*
req
=
__rq
;
break
;
}
if
(
__rq
->
sem
)
continue
;
if
(
__rq
->
cmd
!=
rw
)
continue
;
if
(
__rq
->
nr_sectors
+
count
>
*
max_sectors
)
continue
;
if
(
__rq
->
rq_dev
!=
bh
->
b_rdev
)
continue
;
if
(
__rq
->
nr_sectors
+
count
>
max_sectors
)
continue
;
if
(
__rq
->
elevator_sequence
<
count
)
break
;
if
(
__rq
->
sector
+
__rq
->
nr_sectors
==
bh
->
b_rsector
)
{
ret
=
ELEVATOR_BACK_MERGE
;
*
req
=
__rq
;
break
;
}
if
(
!
__rq
->
elevator_sequence
)
break
;
if
(
__rq
->
sector
-
count
==
bh
->
b_rsector
)
{
__rq
->
elevator_sequence
--
;
}
else
if
(
__rq
->
sector
-
count
==
bh
->
b_rsector
)
{
ret
=
ELEVATOR_FRONT_MERGE
;
__rq
->
elevator_sequence
-=
count
;
*
req
=
__rq
;
break
;
}
}
else
if
(
!*
req
&&
BHRQ_IN_ORDER
(
bh
,
__rq
))
*
req
=
__rq
;
}
return
ret
;
}
void
elevator_linus_merge_cleanup
(
request_queue_t
*
q
,
struct
request
*
req
,
int
count
)
{
struct
list_head
*
entry
=
&
req
->
queue
,
*
head
=
&
q
->
queue_head
;
/*
* second pass scan of requests that got passed over, if any
*/
if
(
ret
!=
ELEVATOR_NO_MERGE
&&
*
req
)
{
while
((
entry
=
entry
->
next
)
!=
&
q
->
queue_head
)
{
struct
request
*
tmp
=
blkdev_entry_to_request
(
entry
);
tmp
->
elevator_sequence
--
;
}
while
((
entry
=
entry
->
next
)
!=
head
)
{
struct
request
*
tmp
=
blkdev_entry_to_request
(
entry
);
tmp
->
elevator_sequence
-=
count
;
}
return
ret
;
}
/*
* No request sorting, just add it to the back of the list
*/
void
elevator_noop
(
struct
request
*
req
,
elevator_t
*
elevator
,
struct
list_head
*
real_head
,
struct
list_head
*
head
,
int
orig_latency
)
void
elevator_linus_merge_req
(
struct
request
*
req
,
struct
request
*
next
)
{
list_add_tail
(
&
req
->
queue
,
real_head
);
if
(
next
->
elevator_sequence
<
req
->
elevator_sequence
)
req
->
elevator_sequence
=
next
->
elevator_sequence
;
}
/*
* See if we can find a request that is buffer can be coalesced with.
* See if we can find a request that
th
is buffer can be coalesced with.
*/
int
elevator_noop_merge
(
request_queue_t
*
q
,
struct
request
**
req
,
struct
list_head
*
head
,
struct
buffer_head
*
bh
,
int
rw
,
int
*
max_sectors
,
int
*
max_segments
)
int
max_sectors
,
int
max_segments
)
{
struct
list_head
*
entry
,
*
head
=
&
q
->
queue_head
;
struct
list_head
*
entry
;
unsigned
int
count
=
bh
->
b_size
>>
9
;
if
(
q
->
head_active
&&
!
q
->
plugged
)
head
=
head
->
next
;
if
(
list_empty
(
&
q
->
queue_head
)
)
return
ELEVATOR_NO_MERGE
;
entry
=
head
;
entry
=
&
q
->
queue_
head
;
while
((
entry
=
entry
->
prev
)
!=
head
)
{
struct
request
*
__rq
=
*
req
=
blkdev_entry_to_request
(
entry
);
if
(
__rq
->
sem
)
continue
;
struct
request
*
__rq
=
blkdev_entry_to_request
(
entry
);
if
(
__rq
->
cmd
!=
rw
)
continue
;
if
(
__rq
->
nr_sectors
+
count
>
*
max_sectors
)
continue
;
if
(
__rq
->
rq_dev
!=
bh
->
b_rdev
)
continue
;
if
(
__rq
->
sector
+
__rq
->
nr_sectors
==
bh
->
b_rsector
)
if
(
__rq
->
nr_sectors
+
count
>
max_sectors
)
continue
;
if
(
__rq
->
sem
)
continue
;
if
(
__rq
->
sector
+
__rq
->
nr_sectors
==
bh
->
b_rsector
)
{
*
req
=
__rq
;
return
ELEVATOR_BACK_MERGE
;
if
(
__rq
->
sector
-
count
==
bh
->
b_rsector
)
}
else
if
(
__rq
->
sector
-
count
==
bh
->
b_rsector
)
{
*
req
=
__rq
;
return
ELEVATOR_FRONT_MERGE
;
}
}
*
req
=
blkdev_entry_to_request
(
q
->
queue_head
.
prev
);
return
ELEVATOR_NO_MERGE
;
}
/*
* The noop "elevator" does not do any accounting
*/
void
elevator_noop_dequeue
(
struct
request
*
req
)
{}
void
elevator_noop_merge_cleanup
(
request_queue_t
*
q
,
struct
request
*
req
,
int
count
)
{}
void
elevator_noop_merge_req
(
struct
request
*
req
,
struct
request
*
next
)
{}
int
blkelvget_ioctl
(
elevator_t
*
elevator
,
blkelv_ioctl_arg_t
*
arg
)
{
...
...
drivers/block/ll_rw_blk.c
View file @
950a2b0b
...
...
@@ -125,7 +125,7 @@ static inline int get_max_sectors(kdev_t dev)
return
max_sectors
[
MAJOR
(
dev
)][
MINOR
(
dev
)];
}
static
inline
request_queue_t
*
__blk_get_queue
(
kdev_t
dev
)
inline
request_queue_t
*
__blk_get_queue
(
kdev_t
dev
)
{
struct
blk_dev_struct
*
bdev
=
blk_dev
+
MAJOR
(
dev
);
...
...
@@ -153,17 +153,14 @@ request_queue_t *blk_get_queue(kdev_t dev)
static
int
__blk_cleanup_queue
(
struct
list_head
*
head
)
{
struct
list_head
*
entry
;
struct
request
*
rq
;
int
i
=
0
;
if
(
list_empty
(
head
))
return
0
;
entry
=
head
->
next
;
do
{
rq
=
list_entry
(
entry
,
struct
request
,
table
);
entry
=
entry
->
next
;
rq
=
list_entry
(
head
->
next
,
struct
request
,
table
);
list_del
(
&
rq
->
table
);
kmem_cache_free
(
request_cachep
,
rq
);
i
++
;
...
...
@@ -192,6 +189,8 @@ void blk_cleanup_queue(request_queue_t * q)
count
-=
__blk_cleanup_queue
(
&
q
->
request_freelist
[
READ
]);
count
-=
__blk_cleanup_queue
(
&
q
->
request_freelist
[
WRITE
]);
count
-=
__blk_cleanup_queue
(
&
q
->
pending_freelist
[
READ
]);
count
-=
__blk_cleanup_queue
(
&
q
->
pending_freelist
[
WRITE
]);
if
(
count
)
printk
(
"blk_cleanup_queue: leaked requests (%d)
\n
"
,
count
);
...
...
@@ -290,7 +289,6 @@ static inline int ll_new_segment(request_queue_t *q, struct request *req, int ma
{
if
(
req
->
nr_segments
<
max_segments
)
{
req
->
nr_segments
++
;
q
->
elevator
.
nr_segments
++
;
return
1
;
}
return
0
;
...
...
@@ -327,7 +325,6 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
if
(
total_segments
>
max_segments
)
return
0
;
q
->
elevator
.
nr_segments
-=
same_segment
;
req
->
nr_segments
=
total_segments
;
return
1
;
}
...
...
@@ -364,7 +361,7 @@ static inline void __generic_unplug_device(request_queue_t *q)
}
}
static
void
generic_unplug_device
(
void
*
data
)
void
generic_unplug_device
(
void
*
data
)
{
request_queue_t
*
q
=
(
request_queue_t
*
)
data
;
unsigned
long
flags
;
...
...
@@ -379,19 +376,24 @@ static void blk_init_free_list(request_queue_t *q)
struct
request
*
rq
;
int
i
;
INIT_LIST_HEAD
(
&
q
->
request_freelist
[
READ
]);
INIT_LIST_HEAD
(
&
q
->
request_freelist
[
WRITE
]);
INIT_LIST_HEAD
(
&
q
->
pending_freelist
[
READ
]);
INIT_LIST_HEAD
(
&
q
->
pending_freelist
[
WRITE
]);
q
->
pending_free
[
READ
]
=
q
->
pending_free
[
WRITE
]
=
0
;
/*
* Divide requests in half between read and write. This used to
* be a 2/3 advantage for reads, but now reads can steal from
* the write free list.
* Divide requests in half between read and write
*/
for
(
i
=
0
;
i
<
QUEUE_NR_REQUESTS
;
i
++
)
{
rq
=
kmem_cache_alloc
(
request_cachep
,
SLAB_KERNEL
);
memset
(
rq
,
0
,
sizeof
(
struct
request
));
rq
->
rq_status
=
RQ_INACTIVE
;
list_add
(
&
rq
->
table
,
&
q
->
request_freelist
[
i
&
1
]);
}
init_waitqueue_head
(
&
q
->
wait_for_request
);
spin_lock_init
(
&
q
->
request
_lock
);
spin_lock_init
(
&
q
->
queue
_lock
);
}
static
int
__make_request
(
request_queue_t
*
q
,
int
rw
,
struct
buffer_head
*
bh
);
...
...
@@ -426,14 +428,12 @@ static int __make_request(request_queue_t * q, int rw, struct buffer_head * bh);
* blk_queue_headactive().
*
* Note:
* blk_init_queue() must be paired with a blk_cleanup
-
queue() call
* blk_init_queue() must be paired with a blk_cleanup
_
queue() call
* when the block device is deactivated (such as at module unload).
**/
void
blk_init_queue
(
request_queue_t
*
q
,
request_fn_proc
*
rfn
)
{
INIT_LIST_HEAD
(
&
q
->
queue_head
);
INIT_LIST_HEAD
(
&
q
->
request_freelist
[
READ
]);
INIT_LIST_HEAD
(
&
q
->
request_freelist
[
WRITE
]);
elevator_init
(
&
q
->
elevator
,
ELEVATOR_LINUS
);
blk_init_free_list
(
q
);
q
->
request_fn
=
rfn
;
...
...
@@ -455,7 +455,6 @@ void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
q
->
head_active
=
1
;
}
#define blkdev_free_rq(list) list_entry((list)->next, struct request, table);
/*
* Get a free request. io_request_lock must be held and interrupts
...
...
@@ -463,37 +462,16 @@ void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
*/
static
inline
struct
request
*
get_request
(
request_queue_t
*
q
,
int
rw
)
{
struct
list_head
*
list
=
&
q
->
request_freelist
[
rw
];
struct
request
*
rq
;
struct
request
*
rq
=
NULL
;
/*
* Reads get preferential treatment and are allowed to steal
* from the write free list if necessary.
*/
if
(
!
list_empty
(
list
))
{
rq
=
blkdev_free_rq
(
list
);
goto
got_rq
;
}
/*
* if the WRITE list is non-empty, we know that rw is READ
* and that the READ list is empty. allow reads to 'steal'
* from the WRITE list.
*/
if
(
!
list_empty
(
&
q
->
request_freelist
[
WRITE
]))
{
list
=
&
q
->
request_freelist
[
WRITE
];
rq
=
blkdev_free_rq
(
list
);
goto
got_rq
;
if
(
!
list_empty
(
&
q
->
request_freelist
[
rw
]))
{
rq
=
blkdev_free_rq
(
&
q
->
request_freelist
[
rw
]);
list_del
(
&
rq
->
table
);
rq
->
rq_status
=
RQ_ACTIVE
;
rq
->
special
=
NULL
;
rq
->
q
=
q
;
}
return
NULL
;
got_rq:
list_del
(
&
rq
->
table
);
rq
->
free_list
=
list
;
rq
->
rq_status
=
RQ_ACTIVE
;
rq
->
special
=
NULL
;
rq
->
q
=
q
;
return
rq
;
}
...
...
@@ -590,16 +568,22 @@ inline void drive_stat_acct (kdev_t dev, int rw,
*/
static
inline
void
add_request
(
request_queue_t
*
q
,
struct
request
*
req
,
struct
list_head
*
head
,
int
lat
)
struct
list_head
*
insert_here
)
{
int
major
;
drive_stat_acct
(
req
->
rq_dev
,
req
->
cmd
,
req
->
nr_sectors
,
1
);
if
(
!
q
->
plugged
&&
q
->
head_active
&&
insert_here
==
&
q
->
queue_head
)
{
spin_unlock_irq
(
&
io_request_lock
);
BUG
();
}
/*
* let selected elevator insert the request
* elevator indicated where it wants this request to be
* inserted at elevator_merge time
*/
q
->
elevator
.
elevator_fn
(
req
,
&
q
->
elevator
,
&
q
->
queue_head
,
head
,
lat
);
list_add
(
&
req
->
queue
,
insert_here
);
/*
* FIXME(eric) I don't understand why there is a need for this
...
...
@@ -617,20 +601,47 @@ static inline void add_request(request_queue_t * q, struct request * req,
(
q
->
request_fn
)(
q
);
}
void
inline
blk_refill_freelist
(
request_queue_t
*
q
,
int
rw
)
{
if
(
q
->
pending_free
[
rw
])
{
list_splice
(
&
q
->
pending_freelist
[
rw
],
&
q
->
request_freelist
[
rw
]);
INIT_LIST_HEAD
(
&
q
->
pending_freelist
[
rw
]);
q
->
pending_free
[
rw
]
=
0
;
}
}
/*
* Must be called with io_request_lock held and interrupts disabled
*/
void
inline
blkdev_release_request
(
struct
request
*
req
)
{
request_queue_t
*
q
=
req
->
q
;
int
rw
=
req
->
cmd
;
req
->
rq_status
=
RQ_INACTIVE
;
req
->
q
=
NULL
;
/*
* Request may not have originated from ll_rw_blk
*/
if
(
req
->
free_list
)
{
list_add
(
&
req
->
table
,
req
->
free_list
);
req
->
free_list
=
NULL
;
wake_up
(
&
req
->
q
->
wait_for_request
);
if
(
q
)
{
if
(
!
list_empty
(
&
q
->
request_freelist
[
rw
]))
{
blk_refill_freelist
(
q
,
rw
);
list_add
(
&
req
->
table
,
&
q
->
request_freelist
[
rw
]);
return
;
}
/*
* free list is empty, add to pending free list and
* batch wakeups
*/
list_add
(
&
req
->
table
,
&
q
->
pending_freelist
[
rw
]);
if
(
++
q
->
pending_free
[
rw
]
>=
(
QUEUE_NR_REQUESTS
>>
4
))
{
int
wake_up
=
q
->
pending_free
[
rw
];
blk_refill_freelist
(
q
,
rw
);
wake_up_nr
(
&
q
->
wait_for_request
,
wake_up
);
}
}
}
...
...
@@ -658,9 +669,10 @@ static void attempt_merge(request_queue_t * q,
* will have been updated to the appropriate number,
* and we shouldn't do it here too.
*/
if
(
!
(
q
->
merge_requests_fn
)
(
q
,
req
,
next
,
max_segments
))
if
(
!
q
->
merge_requests_fn
(
q
,
req
,
next
,
max_segments
))
return
;
q
->
elevator
.
elevator_merge_req_fn
(
req
,
next
);
req
->
bhtail
->
b_reqnext
=
next
->
bh
;
req
->
bhtail
=
next
->
bhtail
;
req
->
nr_sectors
=
req
->
hard_nr_sectors
+=
next
->
hard_nr_sectors
;
...
...
@@ -699,7 +711,7 @@ static int __make_request(request_queue_t * q, int rw,
int
max_segments
=
MAX_SEGMENTS
;
struct
request
*
req
=
NULL
,
*
freereq
=
NULL
;
int
rw_ahead
,
max_sectors
,
el_ret
;
struct
list_head
*
head
;
struct
list_head
*
head
,
*
insert_here
;
int
latency
;
elevator_t
*
elevator
=
&
q
->
elevator
;
...
...
@@ -713,6 +725,7 @@ static int __make_request(request_queue_t * q, int rw,
rw
=
READ
;
/* drop into READ */
case
READ
:
case
WRITE
:
latency
=
elevator_request_latency
(
elevator
,
rw
);
break
;
default:
BUG
();
...
...
@@ -741,38 +754,32 @@ static int __make_request(request_queue_t * q, int rw,
*/
max_sectors
=
get_max_sectors
(
bh
->
b_rdev
);
latency
=
elevator_request_latency
(
elevator
,
rw
);
again:
/*
* Now we acquire the request spinlock, we have to be mega careful
* not to schedule or do something nonatomic
*/
again:
spin_lock_irq
(
&
io_request_lock
);
/*
* skip first entry, for devices with active queue head
*/
head
=
&
q
->
queue_head
;
if
(
q
->
head_active
&&
!
q
->
plugged
)
head
=
head
->
next
;
insert_here
=
head
->
prev
;
if
(
list_empty
(
head
))
{
q
->
plug_device_fn
(
q
,
bh
->
b_rdev
);
/* is atomic */
goto
get_rq
;
}
}
else
if
(
q
->
head_active
&&
!
q
->
plugged
)
head
=
head
->
next
;
el_ret
=
elevator
->
elevator_merge_fn
(
q
,
&
req
,
bh
,
rw
,
&
max_sectors
,
&
max_segments
);
el_ret
=
elevator
->
elevator_merge_fn
(
q
,
&
req
,
head
,
bh
,
rw
,
max_sectors
,
max_segments
);
switch
(
el_ret
)
{
case
ELEVATOR_BACK_MERGE
:
if
(
!
q
->
back_merge_fn
(
q
,
req
,
bh
,
max_segments
))
break
;
elevator
->
elevator_merge_cleanup_fn
(
q
,
req
,
count
);
req
->
bhtail
->
b_reqnext
=
bh
;
req
->
bhtail
=
bh
;
req
->
nr_sectors
=
req
->
hard_nr_sectors
+=
count
;
req
->
e
=
elevator
;
drive_stat_acct
(
req
->
rq_dev
,
req
->
cmd
,
count
,
0
);
attempt_back_merge
(
q
,
req
,
max_sectors
,
max_segments
);
goto
out
;
...
...
@@ -780,20 +787,28 @@ static int __make_request(request_queue_t * q, int rw,
case
ELEVATOR_FRONT_MERGE
:
if
(
!
q
->
front_merge_fn
(
q
,
req
,
bh
,
max_segments
))
break
;
elevator
->
elevator_merge_cleanup_fn
(
q
,
req
,
count
);
bh
->
b_reqnext
=
req
->
bh
;
req
->
bh
=
bh
;
req
->
buffer
=
bh
->
b_data
;
req
->
current_nr_sectors
=
count
;
req
->
sector
=
req
->
hard_sector
=
sector
;
req
->
nr_sectors
=
req
->
hard_nr_sectors
+=
count
;
req
->
e
=
elevator
;
drive_stat_acct
(
req
->
rq_dev
,
req
->
cmd
,
count
,
0
);
attempt_front_merge
(
q
,
head
,
req
,
max_sectors
,
max_segments
);
goto
out
;
/*
* elevator says don't/can't merge. get new request
*/
case
ELEVATOR_NO_MERGE
:
/*
* use elevator hints as to where to insert the
* request. if no hints, just add it to the back
* of the queue
*/
if
(
req
)
insert_here
=
&
req
->
queue
;
break
;
default:
...
...
@@ -821,6 +836,7 @@ static int __make_request(request_queue_t * q, int rw,
}
/* fill up the request-info, and add it to the queue */
req
->
elevator_sequence
=
latency
;
req
->
cmd
=
rw
;
req
->
errors
=
0
;
req
->
hard_sector
=
req
->
sector
=
sector
;
...
...
@@ -833,13 +849,12 @@ static int __make_request(request_queue_t * q, int rw,
req
->
bh
=
bh
;
req
->
bhtail
=
bh
;
req
->
rq_dev
=
bh
->
b_rdev
;
req
->
e
=
elevator
;
add_request
(
q
,
req
,
head
,
latency
);
add_request
(
q
,
req
,
insert_here
);
out:
if
(
!
q
->
plugged
)
(
q
->
request_fn
)(
q
);
if
(
freereq
)
blkdev_release_request
(
freereq
);
if
(
!
q
->
plugged
)
q
->
request_fn
(
q
);
spin_unlock_irq
(
&
io_request_lock
);
return
0
;
end_io:
...
...
@@ -930,7 +945,6 @@ void generic_make_request (int rw, struct buffer_head * bh)
buffer_IO_error
(
bh
);
break
;
}
}
while
(
q
->
make_request_fn
(
q
,
rw
,
bh
));
}
...
...
@@ -1021,6 +1035,9 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
int
correct_size
;
int
i
;
if
(
!
nr
)
return
;
major
=
MAJOR
(
bhs
[
0
]
->
b_dev
);
/* Determine correct block size for this device. */
...
...
@@ -1035,7 +1052,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
for
(
i
=
0
;
i
<
nr
;
i
++
)
{
struct
buffer_head
*
bh
;
bh
=
bhs
[
i
];
if
(
bh
->
b_size
!=
correct_size
)
{
if
(
bh
->
b_size
%
correct_size
)
{
printk
(
KERN_NOTICE
"ll_rw_block: device %s: "
"only %d-char blocks implemented (%u)
\n
"
,
kdevname
(
bhs
[
0
]
->
b_dev
),
...
...
@@ -1138,10 +1155,6 @@ int end_that_request_first (struct request *req, int uptodate, char *name)
void
end_that_request_last
(
struct
request
*
req
)
{
if
(
req
->
e
)
{
printk
(
"end_that_request_last called with non-dequeued req
\n
"
);
BUG
();
}
if
(
req
->
sem
!=
NULL
)
up
(
req
->
sem
);
...
...
@@ -1279,9 +1292,11 @@ EXPORT_SYMBOL(end_that_request_first);
EXPORT_SYMBOL
(
end_that_request_last
);
EXPORT_SYMBOL
(
blk_init_queue
);
EXPORT_SYMBOL
(
blk_get_queue
);
EXPORT_SYMBOL
(
__blk_get_queue
);
EXPORT_SYMBOL
(
blk_cleanup_queue
);
EXPORT_SYMBOL
(
blk_queue_headactive
);
EXPORT_SYMBOL
(
blk_queue_pluggable
);
EXPORT_SYMBOL
(
blk_queue_make_request
);
EXPORT_SYMBOL
(
generic_make_request
);
EXPORT_SYMBOL
(
blkdev_release_request
);
EXPORT_SYMBOL
(
generic_unplug_device
);
drivers/block/paride/pd.c
View file @
950a2b0b
...
...
@@ -392,7 +392,6 @@ static inline int pd_new_segment(request_queue_t *q, struct request *req, int ma
if
(
req
->
nr_segments
<
max_segments
)
{
req
->
nr_segments
++
;
q
->
elevator
.
nr_segments
++
;
return
1
;
}
return
0
;
...
...
@@ -432,7 +431,6 @@ static int pd_merge_requests_fn(request_queue_t *q, struct request *req,
if
(
total_segments
>
max_segments
)
return
0
;
q
->
elevator
.
nr_segments
-=
same_segment
;
req
->
nr_segments
=
total_segments
;
return
1
;
}
...
...
drivers/block/paride/pf.c
View file @
950a2b0b
...
...
@@ -346,7 +346,6 @@ static inline int pf_new_segment(request_queue_t *q, struct request *req, int ma
if
(
req
->
nr_segments
<
max_segments
)
{
req
->
nr_segments
++
;
q
->
elevator
.
nr_segments
++
;
return
1
;
}
return
0
;
...
...
@@ -386,7 +385,6 @@ static int pf_merge_requests_fn(request_queue_t *q, struct request *req,
if
(
total_segments
>
max_segments
)
return
0
;
q
->
elevator
.
nr_segments
-=
same_segment
;
req
->
nr_segments
=
total_segments
;
return
1
;
}
...
...
drivers/i2o/i2o_block.c
View file @
950a2b0b
...
...
@@ -392,7 +392,6 @@ static inline int i2ob_new_segment(request_queue_t *q, struct request *req,
if
(
req
->
nr_segments
<
max_segments
)
{
req
->
nr_segments
++
;
q
->
elevator
.
nr_segments
++
;
return
1
;
}
return
0
;
...
...
@@ -436,7 +435,6 @@ static int i2ob_merge_requests(request_queue_t *q,
if
(
total_segments
>
max_segments
)
return
0
;
q
->
elevator
.
nr_segments
-=
same_segment
;
req
->
nr_segments
=
total_segments
;
return
1
;
}
...
...
drivers/ide/ide-dma.c
View file @
950a2b0b
...
...
@@ -226,6 +226,9 @@ static int ide_build_sglist (ide_hwif_t *hwif, struct request *rq)
unsigned
char
*
virt_addr
=
bh
->
b_data
;
unsigned
int
size
=
bh
->
b_size
;
if
(
nents
>=
PRD_ENTRIES
)
return
0
;
while
((
bh
=
bh
->
b_reqnext
)
!=
NULL
)
{
if
((
virt_addr
+
size
)
!=
(
unsigned
char
*
)
bh
->
b_data
)
break
;
...
...
@@ -259,6 +262,9 @@ int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func)
HWIF
(
drive
)
->
sg_nents
=
i
=
ide_build_sglist
(
HWIF
(
drive
),
HWGROUP
(
drive
)
->
rq
);
if
(
!
i
)
return
0
;
sg
=
HWIF
(
drive
)
->
sg_table
;
while
(
i
&&
sg_dma_len
(
sg
))
{
u32
cur_addr
;
...
...
@@ -274,7 +280,7 @@ int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func)
*/
while
(
cur_len
)
{
if
(
++
count
>=
PRD_ENTRIES
)
{
if
(
count
++
>=
PRD_ENTRIES
)
{
printk
(
"%s: DMA table too small
\n
"
,
drive
->
name
);
pci_unmap_sg
(
HWIF
(
drive
)
->
pci_dev
,
HWIF
(
drive
)
->
sg_table
,
...
...
drivers/ide/ide-probe.c
View file @
950a2b0b
...
...
@@ -134,7 +134,7 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
break
;
}
#endif
printk
(
"CDROM"
);
printk
(
"CD
/DVD-
ROM"
);
break
;
case
ide_tape
:
printk
(
"TAPE"
);
...
...
@@ -761,9 +761,10 @@ static void init_gendisk (ide_hwif_t *hwif)
for
(
unit
=
0
;
unit
<
minors
;
++
unit
)
{
*
bs
++
=
BLOCK_SIZE
;
#ifdef CONFIG_BLK_DEV_PDC4030
*
max_sect
++
=
((
hwif
->
chipset
==
ide_pdc4030
)
?
127
:
MAX_SECTORS
);
*
max_sect
++
=
((
hwif
->
chipset
==
ide_pdc4030
)
?
127
:
256
);
#else
*
max_sect
++
=
MAX_SECTORS
;
/* IDE can do up to 128K per request. */
*
max_sect
++
=
256
;
#endif
*
max_ra
++
=
MAX_READAHEAD
;
}
...
...
drivers/isdn/isdn_v110.c
View file @
950a2b0b
...
...
@@ -102,7 +102,7 @@ isdn_v110_open(unsigned char key, int hdrlen, int maxsize)
int
i
;
isdn_v110_stream
*
v
;
if
((
v
=
kmalloc
(
sizeof
(
isdn_v110_stream
),
GFP_
KERNEL
))
==
NULL
)
if
((
v
=
kmalloc
(
sizeof
(
isdn_v110_stream
),
GFP_
ATOMIC
))
==
NULL
)
return
NULL
;
memset
(
v
,
0
,
sizeof
(
isdn_v110_stream
));
v
->
key
=
key
;
...
...
@@ -134,7 +134,7 @@ isdn_v110_open(unsigned char key, int hdrlen, int maxsize)
v
->
b
=
0
;
v
->
skbres
=
hdrlen
;
v
->
maxsize
=
maxsize
-
hdrlen
;
if
((
v
->
encodebuf
=
kmalloc
(
maxsize
,
GFP_
KERNEL
))
==
NULL
)
{
if
((
v
->
encodebuf
=
kmalloc
(
maxsize
,
GFP_
ATOMIC
))
==
NULL
)
{
kfree
(
v
);
return
NULL
;
}
...
...
drivers/s390/block/dasd.c
View file @
950a2b0b
...
...
@@ -951,7 +951,6 @@ do_dasd_request (request_queue_t *queue)
dasd_debug
((
unsigned
long
)
__builtin_return_address
(
0
));
go
=
1
;
while
(
go
&&
!
list_empty
(
&
queue
->
queue_head
))
{
req
=
blkdev_entry_next_request
(
&
queue
->
queue_head
);
req
=
blkdev_entry_next_request
(
&
queue
->
queue_head
);
di
=
DEVICE_NR
(
req
->
rq_dev
);
dasd_debug
((
unsigned
long
)
req
);
/* req */
...
...
drivers/scsi/constants.c
View file @
950a2b0b
...
...
@@ -776,7 +776,7 @@ void print_sense_internal(const char * devclass,
printk
(
"%s%s: sns = %2x %2x
\n
"
,
devclass
,
kdevname
(
dev
),
sense_buffer
[
0
],
sense_buffer
[
2
]);
printk
(
"Non-extended sense class %d code 0x%0x
"
,
sense_class
,
code
);
printk
(
"Non-extended sense class %d code 0x%0x
\n
"
,
sense_class
,
code
);
s
=
4
;
}
...
...
drivers/scsi/scsi_lib.c
View file @
950a2b0b
...
...
@@ -50,6 +50,50 @@
* This entire source file deals with the new queueing code.
*/
/*
* Function: __scsi_insert_special()
*
* Purpose: worker for scsi_insert_special_*()
*
* Arguments: q - request queue where request should be inserted
* rq - request to be inserted
* data - private data
* at_head - insert request at head or tail of queue
*
* Lock status: Assumed that io_request_lock is not held upon entry.
*
* Returns: Nothing
*/
static
void
__scsi_insert_special
(
request_queue_t
*
q
,
struct
request
*
rq
,
void
*
data
,
int
at_head
)
{
unsigned
long
flags
;
ASSERT_LOCK
(
&
io_request_lock
,
0
);
rq
->
cmd
=
SPECIAL
;
rq
->
special
=
data
;
rq
->
q
=
NULL
;
rq
->
nr_segments
=
0
;
rq
->
elevator_sequence
=
0
;
/*
* We have the option of inserting the head or the tail of the queue.
* Typically we use the tail for new ioctls and so forth. We use the
* head of the queue for things like a QUEUE_FULL message from a
* device, or a host that is unable to accept a particular command.
*/
spin_lock_irqsave
(
&
io_request_lock
,
flags
);
if
(
at_head
)
list_add
(
&
rq
->
queue
,
&
q
->
queue_head
);
else
list_add_tail
(
&
rq
->
queue
,
&
q
->
queue_head
);
q
->
request_fn
(
q
);
spin_unlock_irqrestore
(
&
io_request_lock
,
flags
);
}
/*
* Function: scsi_insert_special_cmd()
...
...
@@ -73,52 +117,9 @@
*/
int
scsi_insert_special_cmd
(
Scsi_Cmnd
*
SCpnt
,
int
at_head
)
{
unsigned
long
flags
;
request_queue_t
*
q
;
ASSERT_LOCK
(
&
io_request_lock
,
0
);
/*
* The SCpnt already contains a request structure - we will doctor the
* thing up with the appropriate values and use that in the actual
* request queue.
*/
q
=
&
SCpnt
->
device
->
request_queue
;
SCpnt
->
request
.
cmd
=
SPECIAL
;
SCpnt
->
request
.
special
=
(
void
*
)
SCpnt
;
SCpnt
->
request
.
q
=
NULL
;
SCpnt
->
request
.
free_list
=
NULL
;
SCpnt
->
request
.
nr_segments
=
0
;
/*
* We have the option of inserting the head or the tail of the queue.
* Typically we use the tail for new ioctls and so forth. We use the
* head of the queue for things like a QUEUE_FULL message from a
* device, or a host that is unable to accept a particular command.
*/
spin_lock_irqsave
(
&
io_request_lock
,
flags
);
if
(
at_head
)
{
list_add
(
&
SCpnt
->
request
.
queue
,
&
q
->
queue_head
);
}
else
{
/*
* FIXME(eric) - we always insert at the tail of the
* list. Otherwise ioctl commands would always take
* precedence over normal I/O. An ioctl on a busy
* disk might be delayed indefinitely because the
* request might not float high enough in the queue
* to be scheduled.
*/
list_add_tail
(
&
SCpnt
->
request
.
queue
,
&
q
->
queue_head
);
}
request_queue_t
*
q
=
&
SCpnt
->
device
->
request_queue
;
/*
* Now hit the requeue function for the queue. If the host is
* already busy, so be it - we have nothing special to do. If
* the host can queue it, then send it off.
*/
q
->
request_fn
(
q
);
spin_unlock_irqrestore
(
&
io_request_lock
,
flags
);
__scsi_insert_special
(
q
,
&
SCpnt
->
request
,
SCpnt
,
at_head
);
return
0
;
}
...
...
@@ -144,51 +145,9 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
*/
int
scsi_insert_special_req
(
Scsi_Request
*
SRpnt
,
int
at_head
)
{
unsigned
long
flags
;
request_queue_t
*
q
;
ASSERT_LOCK
(
&
io_request_lock
,
0
);
/*
* The SCpnt already contains a request structure - we will doctor the
* thing up with the appropriate values and use that in the actual
* request queue.
*/
q
=
&
SRpnt
->
sr_device
->
request_queue
;
SRpnt
->
sr_request
.
cmd
=
SPECIAL
;
SRpnt
->
sr_request
.
special
=
(
void
*
)
SRpnt
;
SRpnt
->
sr_request
.
q
=
NULL
;
SRpnt
->
sr_request
.
nr_segments
=
0
;
/*
* We have the option of inserting the head or the tail of the queue.
* Typically we use the tail for new ioctls and so forth. We use the
* head of the queue for things like a QUEUE_FULL message from a
* device, or a host that is unable to accept a particular command.
*/
spin_lock_irqsave
(
&
io_request_lock
,
flags
);
request_queue_t
*
q
=
&
SRpnt
->
sr_device
->
request_queue
;
if
(
at_head
)
{
list_add
(
&
SRpnt
->
sr_request
.
queue
,
&
q
->
queue_head
);
}
else
{
/*
* FIXME(eric) - we always insert at the tail of the
* list. Otherwise ioctl commands would always take
* precedence over normal I/O. An ioctl on a busy
* disk might be delayed indefinitely because the
* request might not float high enough in the queue
* to be scheduled.
*/
list_add_tail
(
&
SRpnt
->
sr_request
.
queue
,
&
q
->
queue_head
);
}
/*
* Now hit the requeue function for the queue. If the host is
* already busy, so be it - we have nothing special to do. If
* the host can queue it, then send it off.
*/
q
->
request_fn
(
q
);
spin_unlock_irqrestore
(
&
io_request_lock
,
flags
);
__scsi_insert_special
(
q
,
&
SRpnt
->
sr_request
,
SRpnt
,
at_head
);
return
0
;
}
...
...
@@ -862,17 +821,6 @@ void scsi_request_fn(request_queue_t * q)
}
SHpnt
=
SDpnt
->
host
;
/*
* If the host for this device is in error recovery mode, don't
* do anything at all here. When the host leaves error recovery
* mode, it will automatically restart things and start queueing
* commands again. Same goes if the queue is actually plugged,
* if the device itself is blocked, or if the host is fully
* occupied.
*/
if
(
SHpnt
->
in_recovery
||
q
->
plugged
)
return
;
/*
* To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests.
...
...
@@ -896,10 +844,11 @@ void scsi_request_fn(request_queue_t * q)
||
(
SHpnt
->
host_blocked
)
||
(
SHpnt
->
host_self_blocked
))
{
/*
* If we are unable to process any commands at all for this
* device, then we consider it to be starved. What this means
* is that there are no outstanding commands for this device
* and hence we need a little help getting it started again
* If we are unable to process any commands at all for
* this device, then we consider it to be starved.
* What this means is that there are no outstanding
* commands for this device and hence we need a
* little help getting it started again
* once the host isn't quite so busy.
*/
if
(
SDpnt
->
device_busy
==
0
)
{
...
...
@@ -1000,8 +949,8 @@ void scsi_request_fn(request_queue_t * q)
}
/*
* If so, we are ready to do something. Bump the count
* while the queue is locked and then break out of the
loop.
* Otherwise loop around and try another request.
* while the queue is locked and then break out of the
*
loop.
Otherwise loop around and try another request.
*/
if
(
!
SCpnt
)
{
break
;
...
...
@@ -1029,8 +978,9 @@ void scsi_request_fn(request_queue_t * q)
memcpy
(
&
SCpnt
->
request
,
req
,
sizeof
(
struct
request
));
/*
* We have copied the data out of the request block - it is now in
* a field in SCpnt. Release the request block.
* We have copied the data out of the request block -
* it is now in a field in SCpnt. Release the request
* block.
*/
blkdev_release_request
(
req
);
}
...
...
@@ -1047,12 +997,14 @@ void scsi_request_fn(request_queue_t * q)
/*
* This will do a couple of things:
* 1) Fill in the actual SCSI command.
* 2) Fill in any other upper-level specific fields (timeout).
* 2) Fill in any other upper-level specific fields
* (timeout).
*
* If this returns 0, it means that the request failed (reading
* past end of disk, reading offline device, etc). This won't
* actually talk to the device, but some kinds of consistency
* checking may cause the request to be rejected immediately.
* If this returns 0, it means that the request failed
* (reading past end of disk, reading offline device,
* etc). This won't actually talk to the device, but
* some kinds of consistency checking may cause the
* request to be rejected immediately.
*/
if
(
STpnt
==
NULL
)
{
STpnt
=
scsi_get_request_dev
(
req
);
...
...
@@ -1103,8 +1055,8 @@ void scsi_request_fn(request_queue_t * q)
scsi_dispatch_cmd
(
SCpnt
);
/*
* Now we need to grab the lock again. We are about to mess
with
* the request queue and try to find another command.
* Now we need to grab the lock again. We are about to mess
*
with
the request queue and try to find another command.
*/
spin_lock_irq
(
&
io_request_lock
);
}
...
...
drivers/scsi/scsi_merge.c
View file @
950a2b0b
...
...
@@ -324,7 +324,6 @@ static inline int scsi_new_mergeable(request_queue_t * q,
req
->
nr_segments
>=
SHpnt
->
sg_tablesize
)
return
0
;
req
->
nr_segments
++
;
q
->
elevator
.
nr_segments
++
;
return
1
;
}
...
...
@@ -341,11 +340,8 @@ static inline int scsi_new_segment(request_queue_t * q,
if
(
req
->
nr_hw_segments
>=
SHpnt
->
sg_tablesize
||
req
->
nr_segments
>=
SHpnt
->
sg_tablesize
)
return
0
;
if
(
req
->
nr_segments
>=
max_segments
)
return
0
;
req
->
nr_hw_segments
++
;
req
->
nr_segments
++
;
q
->
elevator
.
nr_segments
++
;
return
1
;
}
#else
...
...
@@ -361,7 +357,6 @@ static inline int scsi_new_segment(request_queue_t * q,
* counter.
*/
req
->
nr_segments
++
;
q
->
elevator
.
nr_segments
++
;
return
1
;
}
else
{
return
0
;
...
...
@@ -417,8 +412,10 @@ __inline static int __scsi_back_merge_fn(request_queue_t * q,
SDpnt
=
(
Scsi_Device
*
)
q
->
queuedata
;
SHpnt
=
SDpnt
->
host
;
#ifdef DMA_CHUNK_SIZE
if
(
max_segments
>
64
)
max_segments
=
64
;
#endif
if
(
use_clustering
)
{
/*
...
...
@@ -471,8 +468,10 @@ __inline static int __scsi_front_merge_fn(request_queue_t * q,
SDpnt
=
(
Scsi_Device
*
)
q
->
queuedata
;
SHpnt
=
SDpnt
->
host
;
#ifdef DMA_CHUNK_SIZE
if
(
max_segments
>
64
)
max_segments
=
64
;
#endif
if
(
use_clustering
)
{
/*
...
...
@@ -601,10 +600,10 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
SDpnt
=
(
Scsi_Device
*
)
q
->
queuedata
;
SHpnt
=
SDpnt
->
host
;
#ifdef DMA_CHUNK_SIZE
if
(
max_segments
>
64
)
max_segments
=
64
;
#ifdef DMA_CHUNK_SIZE
/* If it would not fit into prepared memory space for sg chain,
* then don't allow the merge.
*/
...
...
@@ -664,7 +663,6 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
* This one is OK. Let it go.
*/
req
->
nr_segments
+=
next
->
nr_segments
-
1
;
q
->
elevator
.
nr_segments
--
;
#ifdef DMA_CHUNK_SIZE
req
->
nr_hw_segments
+=
next
->
nr_hw_segments
-
1
;
#endif
...
...
drivers/scsi/sg.c
View file @
950a2b0b
...
...
@@ -694,6 +694,7 @@ static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
(
void
*
)
SRpnt
->
sr_buffer
,
hp
->
dxfer_len
,
sg_cmd_done_bh
,
timeout
,
SG_DEFAULT_RETRIES
);
/* dxfer_len overwrites SRpnt->sr_bufflen, hence need for b_malloc_len */
generic_unplug_device
(
&
SRpnt
->
sr_device
->
request_queue
);
return
0
;
}
...
...
drivers/scsi/sr.c
View file @
950a2b0b
...
...
@@ -671,12 +671,14 @@ void get_capabilities(int i)
cmd
[
3
]
=
cmd
[
5
]
=
0
;
rc
=
sr_do_ioctl
(
i
,
cmd
,
buffer
,
128
,
1
,
SCSI_DATA_READ
,
NULL
);
if
(
-
EINVAL
==
rc
)
{
/* failed, drive
has'nt thi
s mode page */
if
(
rc
)
{
/* failed, drive
doesn't have capabilitie
s mode page */
scsi_CDs
[
i
].
cdi
.
speed
=
1
;
/* disable speed select, drive probably can't do this either */
scsi_CDs
[
i
].
cdi
.
mask
|=
CDC_SELECT_SPEED
;
scsi_CDs
[
i
].
cdi
.
mask
|=
(
CDC_CD_R
|
CDC_CD_RW
|
CDC_DVD_R
|
CDC_DVD
|
CDC_DVD_RAM
|
CDC_SELECT_DISC
|
CDC_SELECT_SPEED
);
scsi_free
(
buffer
,
512
);
printk
(
"sr%i: scsi-1 drive
\n
"
);
return
;
}
n
=
buffer
[
3
]
+
4
;
...
...
fs/reiserfs/bitmap.c
View file @
950a2b0b
...
...
@@ -3,6 +3,7 @@
*/
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/reiserfs_fs.h>
#include <linux/locks.h>
...
...
fs/reiserfs/buffer2.c
View file @
950a2b0b
...
...
@@ -12,6 +12,7 @@
*/
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/locks.h>
#include <linux/reiserfs_fs.h>
...
...
fs/reiserfs/dir.c
View file @
950a2b0b
...
...
@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/fs.h>
...
...
@@ -142,6 +143,10 @@ static int reiserfs_readdir (struct file * filp, void * dirent, filldir_t filldi
if
(
!
d_name
[
d_reclen
-
1
])
d_reclen
=
strlen
(
d_name
);
if
(
d_reclen
>
REISERFS_MAX_NAME_LEN
(
inode
->
i_sb
->
s_blocksize
)){
/* too big to send back to VFS */
continue
;
}
d_off
=
deh_offset
(
deh
);
filp
->
f_pos
=
d_off
;
d_ino
=
deh_objectid
(
deh
);
...
...
fs/reiserfs/do_balan.c
View file @
950a2b0b
...
...
@@ -18,6 +18,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/uaccess.h>
#include <linux/sched.h>
#include <linux/reiserfs_fs.h>
...
...
fs/reiserfs/file.c
View file @
950a2b0b
...
...
@@ -64,7 +64,7 @@ static int reiserfs_file_release (struct inode * inode, struct file * filp)
item(s) had to be converted, then it may have to be
indirect2direct converted */
windex
=
push_journal_writer
(
"file_release"
)
;
reiserfs_truncate_file
(
inode
)
;
reiserfs_truncate_file
(
inode
,
0
)
;
pop_journal_writer
(
windex
)
;
}
up
(
&
inode
->
i_sem
);
...
...
@@ -72,6 +72,9 @@ static int reiserfs_file_release (struct inode * inode, struct file * filp)
return
0
;
}
static
void
reiserfs_vfs_truncate_file
(
struct
inode
*
inode
)
{
reiserfs_truncate_file
(
inode
,
1
)
;
}
/* Sync a reiserfs file. */
static
int
reiserfs_sync_file
(
...
...
@@ -115,7 +118,7 @@ struct file_operations reiserfs_file_operations = {
struct
inode_operations
reiserfs_file_inode_operations
=
{
truncate:
reiserfs_truncate_file
,
truncate:
reiserfs_
vfs_
truncate_file
,
};
fs/reiserfs/fix_node.c
View file @
950a2b0b
...
...
@@ -37,6 +37,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/locks.h>
...
...
fs/reiserfs/ibalance.c
View file @
950a2b0b
...
...
@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/uaccess.h>
#include <linux/string.h>
#include <linux/sched.h>
...
...
fs/reiserfs/inode.c
View file @
950a2b0b
...
...
@@ -3,6 +3,7 @@
*/
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/reiserfs_fs.h>
#include <linux/locks.h>
...
...
@@ -1538,7 +1539,7 @@ static int grab_tail_page(struct inode *p_s_inode,
**
** some code taken from block_truncate_page
*/
void
reiserfs_truncate_file
(
struct
inode
*
p_s_inode
)
{
void
reiserfs_truncate_file
(
struct
inode
*
p_s_inode
,
int
update_timestamps
)
{
struct
reiserfs_transaction_handle
th
;
int
windex
;
...
...
@@ -1571,7 +1572,7 @@ void reiserfs_truncate_file(struct inode *p_s_inode) {
prevent_flush_page_lock
(
page
,
p_s_inode
)
;
journal_begin
(
&
th
,
p_s_inode
->
i_sb
,
JOURNAL_PER_BALANCE_CNT
*
2
)
;
windex
=
push_journal_writer
(
"reiserfs_vfs_truncate_file"
)
;
reiserfs_do_truncate
(
&
th
,
p_s_inode
,
page
,
1
/*update timestamps*/
)
;
reiserfs_do_truncate
(
&
th
,
p_s_inode
,
page
,
update_timestamps
)
;
pop_journal_writer
(
windex
)
;
journal_end
(
&
th
,
p_s_inode
->
i_sb
,
JOURNAL_PER_BALANCE_CNT
*
2
)
;
allow_flush_page_lock
(
page
,
p_s_inode
)
;
...
...
fs/reiserfs/journal.c
View file @
950a2b0b
...
...
@@ -43,6 +43,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/uaccess.h>
#include <asm/system.h>
...
...
@@ -415,7 +416,7 @@ inline int mark_buffer_not_journaled(struct buffer_head *bh) {
** kernel lock held. caller is the string printed just before calling BUG()
*/
void
reiserfs_check_lock_depth
(
char
*
caller
)
{
#ifdef
__SMP__
#ifdef
CONFIG_SMP
if
(
current
->
lock_depth
<
0
)
{
printk
(
"%s called without kernel lock held
\n
"
,
caller
)
;
show_reiserfs_locks
()
;
...
...
@@ -865,14 +866,20 @@ static int flush_older_journal_lists(struct super_block *p_s_sb, struct reiserfs
return
0
;
}
static
void
submit_logged_buffer
(
struct
buffer_head
*
bh
)
{
mark_buffer_notjournal_new
(
bh
)
;
static
void
reiserfs_end_buffer_io_sync
(
struct
buffer_head
*
bh
,
int
uptodate
)
{
if
(
buffer_journaled
(
bh
))
{
reiserfs_warning
(
"clm-2084: pinned buffer %u:%s sent to disk
\n
"
,
bh
->
b_blocknr
,
kdevname
(
bh
->
b_dev
))
;
}
set_bit
(
BH_Dirty
,
&
bh
->
b_state
)
;
ll_rw_block
(
WRITE
,
1
,
&
bh
)
;
mark_buffer_uptodate
(
bh
,
uptodate
)
;
unlock_buffer
(
bh
)
;
}
static
void
submit_logged_buffer
(
struct
buffer_head
*
bh
)
{
lock_buffer
(
bh
)
;
bh
->
b_end_io
=
reiserfs_end_buffer_io_sync
;
mark_buffer_notjournal_new
(
bh
)
;
clear_bit
(
BH_Dirty
,
&
bh
->
b_state
)
;
submit_bh
(
WRITE
,
bh
)
;
}
/* flush a journal list, both commit and real blocks
...
...
fs/reiserfs/lbalance.c
View file @
950a2b0b
...
...
@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/uaccess.h>
#include <linux/string.h>
#include <linux/sched.h>
...
...
fs/reiserfs/namei.c
View file @
950a2b0b
...
...
@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/reiserfs_fs.h>
#include <linux/smp_lock.h>
...
...
fs/reiserfs/objectid.c
View file @
950a2b0b
...
...
@@ -3,6 +3,7 @@
*/
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/string.h>
#include <linux/locks.h>
#include <linux/sched.h>
...
...
fs/reiserfs/prints.c
View file @
950a2b0b
...
...
@@ -3,7 +3,7 @@
*/
#ifdef __KERNEL__
#include <
stdar
g.h>
#include <
linux/confi
g.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/reiserfs_fs.h>
...
...
@@ -12,11 +12,11 @@
#else
#include "nokernel.h"
#include <stdarg.h>
#include <limits.h>
#endif
#include <stdarg.h>
static
char
error_buf
[
1024
];
static
char
fmt_buf
[
1024
];
...
...
fs/reiserfs/stree.c
View file @
950a2b0b
...
...
@@ -54,6 +54,7 @@
*/
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/locks.h>
...
...
fs/reiserfs/super.c
View file @
950a2b0b
...
...
@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <asm/uaccess.h>
...
...
fs/reiserfs/tail_conversion.c
View file @
950a2b0b
...
...
@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/pagemap.h>
#include <linux/reiserfs_fs.h>
...
...
include/linux/blk.h
View file @
950a2b0b
...
...
@@ -87,10 +87,6 @@ void initrd_init(void);
static
inline
void
blkdev_dequeue_request
(
struct
request
*
req
)
{
if
(
req
->
e
)
{
req
->
e
->
dequeue_fn
(
req
);
req
->
e
=
NULL
;
}
list_del
(
&
req
->
queue
);
}
...
...
include/linux/blkdev.h
View file @
950a2b0b
...
...
@@ -23,8 +23,6 @@ struct request {
int
elevator_sequence
;
struct
list_head
table
;
struct
list_head
*
free_list
;
volatile
int
rq_status
;
/* should split this into a few status bits */
#define RQ_INACTIVE (-1)
#define RQ_ACTIVE 1
...
...
@@ -47,7 +45,6 @@ struct request {
struct
buffer_head
*
bh
;
struct
buffer_head
*
bhtail
;
request_queue_t
*
q
;
elevator_t
*
e
;
};
#include <linux/elevator.h>
...
...
@@ -69,7 +66,7 @@ typedef void (unplug_device_fn) (void *q);
/*
* Default nr free requests per queue
*/
#define QUEUE_NR_REQUESTS
256
#define QUEUE_NR_REQUESTS
512
struct
request_queue
{
...
...
@@ -77,6 +74,8 @@ struct request_queue
* the queue request freelist, one for reads and one for writes
*/
struct
list_head
request_freelist
[
2
];
struct
list_head
pending_freelist
[
2
];
int
pending_free
[
2
];
/*
* Together with queue_head for cacheline sharing
...
...
@@ -116,7 +115,7 @@ struct request_queue
* Is meant to protect the queue in the future instead of
* io_request_lock
*/
spinlock_t
request
_lock
;
spinlock_t
queue
_lock
;
/*
* Tasks wait here for free request
...
...
@@ -152,6 +151,7 @@ extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, lon
extern
void
register_disk
(
struct
gendisk
*
dev
,
kdev_t
first
,
unsigned
minors
,
struct
block_device_operations
*
ops
,
long
size
);
extern
void
generic_make_request
(
int
rw
,
struct
buffer_head
*
bh
);
extern
request_queue_t
*
blk_get_queue
(
kdev_t
dev
);
extern
inline
request_queue_t
*
__blk_get_queue
(
kdev_t
dev
);
extern
void
blkdev_release_request
(
struct
request
*
);
/*
...
...
@@ -162,6 +162,7 @@ extern void blk_cleanup_queue(request_queue_t *);
extern
void
blk_queue_headactive
(
request_queue_t
*
,
int
);
extern
void
blk_queue_pluggable
(
request_queue_t
*
,
plug_device_fn
*
);
extern
void
blk_queue_make_request
(
request_queue_t
*
,
make_request_fn
*
);
extern
void
generic_unplug_device
(
void
*
);
extern
int
*
blk_size
[
MAX_BLKDEV
];
...
...
@@ -175,9 +176,8 @@ extern int * max_sectors[MAX_BLKDEV];
extern
int
*
max_segments
[
MAX_BLKDEV
];
#define MAX_SECTORS 254
#define MAX_SEGMENTS MAX_SECTORS
#define MAX_SEGMENTS 128
#define MAX_SECTORS (MAX_SEGMENTS*8)
#define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
...
...
include/linux/elevator.h
View file @
950a2b0b
...
...
@@ -7,34 +7,32 @@ typedef void (elevator_fn) (struct request *, elevator_t *,
struct
list_head
*
,
struct
list_head
*
,
int
);
typedef
int
(
elevator_merge_fn
)
(
request_queue_t
*
,
struct
request
**
,
struct
buffer_head
*
,
int
,
int
*
,
int
*
);
typedef
int
(
elevator_merge_fn
)
(
request_queue_t
*
,
struct
request
**
,
struct
list_head
*
,
struct
buffer_head
*
,
int
,
int
,
int
);
typedef
void
(
elevator_dequeue_fn
)
(
struct
request
*
);
typedef
void
(
elevator_merge_cleanup_fn
)
(
request_queue_t
*
,
struct
request
*
,
int
);
typedef
void
(
elevator_merge_req_fn
)
(
struct
request
*
,
struct
request
*
);
struct
elevator_s
{
int
sequence
;
int
read_latency
;
int
write_latency
;
int
max_bomb_segments
;
unsigned
int
nr_segments
;
int
read_pendings
;
elevator_fn
*
elevator_fn
;
elevator_merge_fn
*
elevator_merge_fn
;
elevator_dequeue_fn
*
dequeue_fn
;
elevator_merge_cleanup_fn
*
elevator_merge_cleanup_fn
;
elevator_merge_req_fn
*
elevator_merge_req_fn
;
unsigned
int
queue_ID
;
};
void
elevator_noop
(
struct
request
*
,
elevator_t
*
,
struct
list_head
*
,
struct
list_head
*
,
int
);
int
elevator_noop_merge
(
request_queue_t
*
,
struct
request
**
,
struct
buffer_head
*
,
int
,
int
*
,
int
*
);
void
elevator_noop_dequeue
(
struct
request
*
);
void
elevator_linus
(
struct
request
*
,
elevator_t
*
,
struct
list_head
*
,
struct
list_head
*
,
int
);
int
elevator_linus_merge
(
request_queue_t
*
,
struct
request
**
,
struct
buffer_head
*
,
int
,
int
*
,
int
*
);
int
elevator_noop_merge
(
request_queue_t
*
,
struct
request
**
,
struct
list_head
*
,
struct
buffer_head
*
,
int
,
int
,
int
);
void
elevator_noop_merge_cleanup
(
request_queue_t
*
,
struct
request
*
,
int
);
void
elevator_noop_merge_req
(
struct
request
*
,
struct
request
*
);
int
elevator_linus_merge
(
request_queue_t
*
,
struct
request
**
,
struct
list_head
*
,
struct
buffer_head
*
,
int
,
int
,
int
);
void
elevator_linus_merge_cleanup
(
request_queue_t
*
,
struct
request
*
,
int
);
void
elevator_linus_merge_req
(
struct
request
*
,
struct
request
*
);
typedef
struct
blkelv_ioctl_arg_s
{
int
queue_ID
;
...
...
@@ -69,6 +67,10 @@ extern void elevator_init(elevator_t *, elevator_t);
(s1)->sector < (s2)->sector)) || \
(s1)->rq_dev < (s2)->rq_dev)
#define BHRQ_IN_ORDER(bh, rq) \
(((bh)->b_rdev == (rq)->rq_dev && \
(bh)->b_rsector < (rq)->sector))
static
inline
int
elevator_request_latency
(
elevator_t
*
elevator
,
int
rw
)
{
int
latency
;
...
...
@@ -80,36 +82,24 @@ static inline int elevator_request_latency(elevator_t * elevator, int rw)
return
latency
;
}
#define ELEVATOR_NOOP \
((elevator_t) { \
0,
/* sequence */
\
\
0,
/* read_latency */
\
0,
/* write_latency */
\
0,
/* max_bomb_segments */
\
\
0,
/* nr_segments */
\
0,
/* read_pendings */
\
\
elevator_noop,
/* elevator_fn */
\
elevator_noop_merge,
/* elevator_merge_fn */
\
elevator_noop_dequeue,
/* dequeue_fn */
\
#define ELEVATOR_NOOP \
((elevator_t) { \
0,
/* read_latency */
\
0,
/* write_latency */
\
\
elevator_noop_merge,
/* elevator_merge_fn */
\
elevator_noop_merge_cleanup,
/* elevator_merge_cleanup_fn */
\
elevator_noop_merge_req,
/* elevator_merge_req_fn */
\
})
#define ELEVATOR_LINUS \
((elevator_t) { \
0,
/* not used */
\
\
1000000,
/* read passovers */
\
2000000,
/* write passovers */
\
0,
/* max_bomb_segments */
\
\
0,
/* not used */
\
0,
/* not used */
\
\
elevator_linus,
/* elevator_fn */
\
elevator_linus_merge,
/* elevator_merge_fn */
\
elevator_noop_dequeue,
/* dequeue_fn */
\
#define ELEVATOR_LINUS \
((elevator_t) { \
8192,
/* read passovers */
\
16384,
/* write passovers */
\
\
elevator_linus_merge,
/* elevator_merge_fn */
\
elevator_linus_merge_cleanup,
/* elevator_merge_cleanup_fn */
\
elevator_linus_merge_req,
/* elevator_merge_req_fn */
\
})
#endif
include/linux/reiserfs_fs.h
View file @
950a2b0b
...
...
@@ -926,8 +926,7 @@ extern inline int entry_length (struct buffer_head * bh, struct item_head * ih,
//((block_size - BLKH_SIZE - IH_SIZE - DEH_SIZE * 2) / 2)
// two entries per block (at least)
#define REISERFS_MAX_NAME_LEN(block_size) \
((block_size - BLKH_SIZE - IH_SIZE - DEH_SIZE))
#define REISERFS_MAX_NAME_LEN(block_size) 255
...
...
@@ -1753,7 +1752,6 @@ void reiserfs_do_truncate (struct reiserfs_transaction_handle *th,
struct
inode
*
p_s_inode
,
struct
page
*
,
int
update_timestamps
);
//
void
reiserfs_vfs_truncate_file
(
struct
inode
*
p_s_inode
);
//void lock_inode_to_convert (struct inode * p_s_inode);
//void unlock_inode_after_convert (struct inode * p_s_inode);
//void increment_i_read_sync_counter (struct inode * p_s_inode);
...
...
@@ -1792,7 +1790,7 @@ void padd_item (char * item, int total_length, int length);
/* inode.c */
int
reiserfs_prepare_write
(
struct
file
*
,
struct
page
*
,
unsigned
,
unsigned
)
;
void
reiserfs_truncate_file
(
struct
inode
*
)
;
void
reiserfs_truncate_file
(
struct
inode
*
,
int
update_timestamps
)
;
void
make_cpu_key
(
struct
cpu_key
*
cpu_key
,
const
struct
inode
*
inode
,
loff_t
offset
,
int
type
,
int
key_length
);
void
make_le_item_head
(
struct
item_head
*
ih
,
struct
cpu_key
*
key
,
int
version
,
...
...
include/linux/sched.h
View file @
950a2b0b
...
...
@@ -543,8 +543,8 @@ extern unsigned long prof_shift;
#define CURRENT_TIME (xtime.tv_sec)
extern
void
FASTCALL
(
__wake_up
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
unsigned
int
wq_mode
));
extern
void
FASTCALL
(
__wake_up_sync
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
unsigned
int
wq_mode
));
extern
void
FASTCALL
(
__wake_up
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
int
nr
));
extern
void
FASTCALL
(
__wake_up_sync
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
int
nr
));
extern
void
FASTCALL
(
sleep_on
(
wait_queue_head_t
*
q
));
extern
long
FASTCALL
(
sleep_on_timeout
(
wait_queue_head_t
*
q
,
signed
long
timeout
));
...
...
@@ -553,12 +553,16 @@ extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
signed
long
timeout
));
extern
void
FASTCALL
(
wake_up_process
(
struct
task_struct
*
tsk
));
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE)
#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,0)
#define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE)
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE)
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE,0)
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE,WQ_FLAG_EXCLUSIVE)
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
#define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0)
#define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
#define wake_up_sync_nr(x, nr) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
#define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr)
#define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0)
#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
#define wake_up_interruptible_sync_nr(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, nr)
extern
int
in_group_p
(
gid_t
);
extern
int
in_egroup_p
(
gid_t
);
...
...
kernel/sched.c
View file @
950a2b0b
...
...
@@ -690,19 +690,15 @@ asmlinkage void schedule(void)
}
static
inline
void
__wake_up_common
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
unsigned
int
wq_mod
e
,
const
int
sync
)
int
nr_exclusiv
e
,
const
int
sync
)
{
struct
list_head
*
tmp
,
*
head
;
struct
task_struct
*
p
,
*
best_exclusive
;
struct
task_struct
*
p
;
unsigned
long
flags
;
int
best_cpu
,
irq
;
if
(
!
q
)
goto
out
;
best_cpu
=
smp_processor_id
();
irq
=
in_interrupt
();
best_exclusive
=
NULL
;
wq_write_lock_irqsave
(
&
q
->
lock
,
flags
);
#if WAITQUEUE_DEBUG
...
...
@@ -730,47 +726,27 @@ static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
#if WAITQUEUE_DEBUG
curr
->
__waker
=
(
long
)
__builtin_return_address
(
0
);
#endif
/*
* If waking up from an interrupt context then
* prefer processes which are affine to this
* CPU.
*/
if
(
irq
&&
(
curr
->
flags
&
wq_mode
&
WQ_FLAG_EXCLUSIVE
))
{
if
(
!
best_exclusive
)
best_exclusive
=
p
;
if
(
p
->
processor
==
best_cpu
)
{
best_exclusive
=
p
;
break
;
}
}
else
{
if
(
sync
)
wake_up_process_synchronous
(
p
);
else
wake_up_process
(
p
);
if
(
curr
->
flags
&
wq_mode
&
WQ_FLAG_EXCLUSIVE
)
break
;
}
if
(
sync
)
wake_up_process_synchronous
(
p
);
else
wake_up_process
(
p
);
if
((
curr
->
flags
&
WQ_FLAG_EXCLUSIVE
)
&&
!--
nr_exclusive
)
break
;
}
}
if
(
best_exclusive
)
{
if
(
sync
)
wake_up_process_synchronous
(
best_exclusive
);
else
wake_up_process
(
best_exclusive
);
}
wq_write_unlock_irqrestore
(
&
q
->
lock
,
flags
);
out:
return
;
}
void
__wake_up
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
unsigned
int
wq_mode
)
void
__wake_up
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
int
nr
)
{
__wake_up_common
(
q
,
mode
,
wq_mode
,
0
);
__wake_up_common
(
q
,
mode
,
nr
,
0
);
}
void
__wake_up_sync
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
unsigned
int
wq_mode
)
void
__wake_up_sync
(
wait_queue_head_t
*
q
,
unsigned
int
mode
,
int
nr
)
{
__wake_up_common
(
q
,
mode
,
wq_mode
,
1
);
__wake_up_common
(
q
,
mode
,
nr
,
1
);
}
#define SLEEP_ON_VAR \
...
...
mm/filemap.c
View file @
950a2b0b
...
...
@@ -974,10 +974,6 @@ static void generic_file_readahead(int reada_ok,
* accessed sequentially.
*/
if
(
ahead
)
{
if
(
reada_ok
==
2
)
{
run_task_queue
(
&
tq_disk
);
}
filp
->
f_ralen
+=
ahead
;
filp
->
f_rawin
+=
filp
->
f_ralen
;
filp
->
f_raend
=
raend
+
ahead
+
1
;
...
...
scripts/checkconfig.pl
View file @
950a2b0b
...
...
@@ -14,6 +14,7 @@ foreach $file (@ARGV)
# Initialize variables.
my
$fInComment
=
0
;
my
$fInString
=
0
;
my
$fUseConfig
=
0
;
my
$iLinuxConfig
=
0
;
my
%
configList
=
();
...
...
@@ -24,6 +25,10 @@ foreach $file (@ARGV)
$fInComment
&&
(
s+^.*?\*/+ +o
?
(
$fInComment
=
0
)
:
next
);
m+/\*+o
&&
(
s+/\*.*?\*/+ +go
,
(
s+/\*.*$+ +o
&&
(
$fInComment
=
1
)));
# Strip strings.
$fInString
&&
(
s+^.*?"+ +o
?
(
$fInString
=
0
)
:
next
);
m+"+o
&&
(
s+".*?"+ +go
,
(
s+".*$+ +o
&&
(
$fInString
=
1
)));
# Pick up definitions.
if
(
m/^\s*#/o
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment