Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
8baa8006
Commit
8baa8006
authored
Oct 27, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
http://gkernel.bkbits.net/misc-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
27b727c8
97565ed3
Changes
22
Hide whitespace changes
Inline
Side-by-side
Showing
22 changed files
with
420 additions
and
273 deletions
+420
-273
drivers/block/deadline-iosched.c
drivers/block/deadline-iosched.c
+6
-0
drivers/block/elevator.c
drivers/block/elevator.c
+52
-19
drivers/block/ll_rw_blk.c
drivers/block/ll_rw_blk.c
+173
-80
drivers/block/umem.c
drivers/block/umem.c
+1
-6
drivers/ide/ide-disk.c
drivers/ide/ide-disk.c
+0
-50
drivers/ide/ide-floppy.c
drivers/ide/ide-floppy.c
+21
-0
drivers/ide/ide.c
drivers/ide/ide.c
+31
-22
drivers/md/linear.c
drivers/md/linear.c
+8
-6
drivers/md/raid0.c
drivers/md/raid0.c
+2
-3
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_lib.c
+2
-2
drivers/scsi/sr_ioctl.c
drivers/scsi/sr_ioctl.c
+2
-4
fs/bio.c
fs/bio.c
+28
-18
fs/direct-io.c
fs/direct-io.c
+2
-2
fs/mpage.c
fs/mpage.c
+6
-2
fs/pipe.c
fs/pipe.c
+2
-2
fs/xfs/pagebuf/page_buf.c
fs/xfs/pagebuf/page_buf.c
+1
-1
include/asm-i386/ide.h
include/asm-i386/ide.h
+1
-0
include/linux/bio.h
include/linux/bio.h
+2
-1
include/linux/blk.h
include/linux/blk.h
+4
-17
include/linux/blkdev.h
include/linux/blkdev.h
+27
-1
include/linux/elevator.h
include/linux/elevator.h
+5
-2
mm/highmem.c
mm/highmem.c
+44
-35
No files found.
drivers/block/deadline-iosched.c
View file @
8baa8006
...
@@ -118,6 +118,8 @@ deadline_find_hash(struct deadline_data *dd, sector_t offset)
...
@@ -118,6 +118,8 @@ deadline_find_hash(struct deadline_data *dd, sector_t offset)
while
((
entry
=
next
)
!=
hash_list
)
{
while
((
entry
=
next
)
!=
hash_list
)
{
next
=
entry
->
next
;
next
=
entry
->
next
;
prefetch
(
next
);
drq
=
list_entry_hash
(
entry
);
drq
=
list_entry_hash
(
entry
);
BUG_ON
(
!
drq
->
hash_valid_count
);
BUG_ON
(
!
drq
->
hash_valid_count
);
...
@@ -191,6 +193,8 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
...
@@ -191,6 +193,8 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
while
((
entry
=
entry
->
prev
)
!=
sort_list
)
{
while
((
entry
=
entry
->
prev
)
!=
sort_list
)
{
__rq
=
list_entry_rq
(
entry
);
__rq
=
list_entry_rq
(
entry
);
prefetch
(
entry
->
prev
);
BUG_ON
(
__rq
->
flags
&
REQ_STARTED
);
BUG_ON
(
__rq
->
flags
&
REQ_STARTED
);
if
(
!
(
__rq
->
flags
&
REQ_CMD
))
if
(
!
(
__rq
->
flags
&
REQ_CMD
))
...
@@ -298,6 +302,8 @@ static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
...
@@ -298,6 +302,8 @@ static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
struct
list_head
*
nxt
=
rq
->
queuelist
.
next
;
struct
list_head
*
nxt
=
rq
->
queuelist
.
next
;
int
this_rq_cost
;
int
this_rq_cost
;
prefetch
(
nxt
);
/*
/*
* take it off the sort and fifo list, move
* take it off the sort and fifo list, move
* to dispatch queue
* to dispatch queue
...
...
drivers/block/elevator.c
View file @
8baa8006
...
@@ -272,13 +272,27 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
...
@@ -272,13 +272,27 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
e
->
elevator_merge_req_fn
(
q
,
rq
,
next
);
e
->
elevator_merge_req_fn
(
q
,
rq
,
next
);
}
}
/*
void
__elv_add_request
(
request_queue_t
*
q
,
struct
request
*
rq
,
int
at_end
,
* add_request and next_request are required to be supported, naturally
int
plug
)
*/
{
void
__elv_add_request
(
request_queue_t
*
q
,
struct
request
*
rq
,
struct
list_head
*
insert
=
&
q
->
queue_head
;
struct
list_head
*
insert_here
)
if
(
at_end
)
insert
=
insert
->
prev
;
if
(
plug
)
blk_plug_device
(
q
);
q
->
elevator
.
elevator_add_req_fn
(
q
,
rq
,
insert
);
}
void
elv_add_request
(
request_queue_t
*
q
,
struct
request
*
rq
,
int
at_end
,
int
plug
)
{
{
q
->
elevator
.
elevator_add_req_fn
(
q
,
rq
,
insert_here
);
unsigned
long
flags
;
spin_lock_irqsave
(
q
->
queue_lock
,
flags
);
__elv_add_request
(
q
,
rq
,
at_end
,
plug
);
spin_unlock_irqrestore
(
q
->
queue_lock
,
flags
);
}
}
static
inline
struct
request
*
__elv_next_request
(
request_queue_t
*
q
)
static
inline
struct
request
*
__elv_next_request
(
request_queue_t
*
q
)
...
@@ -289,8 +303,14 @@ static inline struct request *__elv_next_request(request_queue_t *q)
...
@@ -289,8 +303,14 @@ static inline struct request *__elv_next_request(request_queue_t *q)
struct
request
*
elv_next_request
(
request_queue_t
*
q
)
struct
request
*
elv_next_request
(
request_queue_t
*
q
)
{
{
struct
request
*
rq
;
struct
request
*
rq
;
int
ret
;
while
((
rq
=
__elv_next_request
(
q
)))
{
while
((
rq
=
__elv_next_request
(
q
)))
{
/*
* just mark as started even if we don't start it, a request
* that has been delayed should not be passed by new incoming
* requests
*/
rq
->
flags
|=
REQ_STARTED
;
rq
->
flags
|=
REQ_STARTED
;
if
(
&
rq
->
queuelist
==
q
->
last_merge
)
if
(
&
rq
->
queuelist
==
q
->
last_merge
)
...
@@ -299,20 +319,22 @@ struct request *elv_next_request(request_queue_t *q)
...
@@ -299,20 +319,22 @@ struct request *elv_next_request(request_queue_t *q)
if
((
rq
->
flags
&
REQ_DONTPREP
)
||
!
q
->
prep_rq_fn
)
if
((
rq
->
flags
&
REQ_DONTPREP
)
||
!
q
->
prep_rq_fn
)
break
;
break
;
/*
ret
=
q
->
prep_rq_fn
(
q
,
rq
);
* all ok, break and return it
if
(
ret
==
BLKPREP_OK
)
{
*/
if
(
!
q
->
prep_rq_fn
(
q
,
rq
))
break
;
break
;
}
else
if
(
ret
==
BLKPREP_DEFER
)
{
/*
rq
=
NULL
;
* prep said no-go, kill it
break
;
*/
}
else
if
(
ret
==
BLKPREP_KILL
)
{
blkdev_dequeue_request
(
rq
);
blkdev_dequeue_request
(
rq
);
if
(
end_that_request_first
(
rq
,
0
,
rq
->
nr_sectors
))
rq
->
flags
|=
REQ_QUIET
;
BUG
();
while
(
end_that_request_first
(
rq
,
0
,
rq
->
nr_sectors
))
;
end_that_request_last
(
rq
);
end_that_request_last
(
rq
);
}
else
{
printk
(
"%s: bad return=%d
\n
"
,
__FUNCTION__
,
ret
);
break
;
}
}
}
return
rq
;
return
rq
;
...
@@ -322,6 +344,16 @@ void elv_remove_request(request_queue_t *q, struct request *rq)
...
@@ -322,6 +344,16 @@ void elv_remove_request(request_queue_t *q, struct request *rq)
{
{
elevator_t
*
e
=
&
q
->
elevator
;
elevator_t
*
e
=
&
q
->
elevator
;
/*
* the main clearing point for q->last_merge is on retrieval of
* request by driver (it calls elv_next_request()), but it _can_
* also happen here if a request is added to the queue but later
* deleted without ever being given to driver (merged with another
* request).
*/
if
(
&
rq
->
queuelist
==
q
->
last_merge
)
q
->
last_merge
=
NULL
;
if
(
e
->
elevator_remove_req_fn
)
if
(
e
->
elevator_remove_req_fn
)
e
->
elevator_remove_req_fn
(
q
,
rq
);
e
->
elevator_remove_req_fn
(
q
,
rq
);
}
}
...
@@ -357,6 +389,7 @@ module_init(elevator_global_init);
...
@@ -357,6 +389,7 @@ module_init(elevator_global_init);
EXPORT_SYMBOL
(
elevator_noop
);
EXPORT_SYMBOL
(
elevator_noop
);
EXPORT_SYMBOL
(
elv_add_request
);
EXPORT_SYMBOL
(
__elv_add_request
);
EXPORT_SYMBOL
(
__elv_add_request
);
EXPORT_SYMBOL
(
elv_next_request
);
EXPORT_SYMBOL
(
elv_next_request
);
EXPORT_SYMBOL
(
elv_remove_request
);
EXPORT_SYMBOL
(
elv_remove_request
);
...
...
drivers/block/ll_rw_blk.c
View file @
8baa8006
...
@@ -242,6 +242,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
...
@@ -242,6 +242,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
q
->
backing_dev_info
.
state
=
0
;
q
->
backing_dev_info
.
state
=
0
;
blk_queue_max_sectors
(
q
,
MAX_SECTORS
);
blk_queue_max_sectors
(
q
,
MAX_SECTORS
);
blk_queue_hardsect_size
(
q
,
512
);
blk_queue_hardsect_size
(
q
,
512
);
blk_queue_dma_alignment
(
q
,
511
);
/*
/*
* by default assume old behaviour and bounce for any highmem page
* by default assume old behaviour and bounce for any highmem page
...
@@ -408,6 +409,21 @@ void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
...
@@ -408,6 +409,21 @@ void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
q
->
seg_boundary_mask
=
mask
;
q
->
seg_boundary_mask
=
mask
;
}
}
/**
* blk_queue_dma_alignment - set dma length and memory alignment
* @q: the request queue for the device
* @dma_mask: alignment mask
*
* description:
* set required memory and length aligment for direct dma transactions.
* this is used when buiding direct io requests for the queue.
*
**/
void
blk_queue_dma_alignment
(
request_queue_t
*
q
,
int
mask
)
{
q
->
dma_alignment
=
mask
;
}
void
blk_queue_assign_lock
(
request_queue_t
*
q
,
spinlock_t
*
lock
)
void
blk_queue_assign_lock
(
request_queue_t
*
q
,
spinlock_t
*
lock
)
{
{
spin_lock_init
(
lock
);
spin_lock_init
(
lock
);
...
@@ -549,7 +565,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
...
@@ -549,7 +565,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
return
;
return
;
}
}
list_del
(
&
rq
->
queuelist
);
list_del
_init
(
&
rq
->
queuelist
);
rq
->
flags
&=
~
REQ_QUEUED
;
rq
->
flags
&=
~
REQ_QUEUED
;
rq
->
tag
=
-
1
;
rq
->
tag
=
-
1
;
...
@@ -633,13 +649,13 @@ void blk_queue_invalidate_tags(request_queue_t *q)
...
@@ -633,13 +649,13 @@ void blk_queue_invalidate_tags(request_queue_t *q)
if
(
rq
->
tag
==
-
1
)
{
if
(
rq
->
tag
==
-
1
)
{
printk
(
"bad tag found on list
\n
"
);
printk
(
"bad tag found on list
\n
"
);
list_del
(
&
rq
->
queuelist
);
list_del
_init
(
&
rq
->
queuelist
);
rq
->
flags
&=
~
REQ_QUEUED
;
rq
->
flags
&=
~
REQ_QUEUED
;
}
else
}
else
blk_queue_end_tag
(
q
,
rq
);
blk_queue_end_tag
(
q
,
rq
);
rq
->
flags
&=
~
REQ_STARTED
;
rq
->
flags
&=
~
REQ_STARTED
;
elv_add_request
(
q
,
rq
,
0
);
__elv_add_request
(
q
,
rq
,
0
,
0
);
}
}
}
}
...
@@ -655,14 +671,19 @@ static char *rq_flags[] = {
...
@@ -655,14 +671,19 @@ static char *rq_flags[] = {
"REQ_PC"
,
"REQ_PC"
,
"REQ_BLOCK_PC"
,
"REQ_BLOCK_PC"
,
"REQ_SENSE"
,
"REQ_SENSE"
,
"REQ_FAILED"
,
"REQ_QUIET"
,
"REQ_SPECIAL"
"REQ_SPECIAL"
"REQ_DRIVE_CMD"
,
"REQ_DRIVE_TASK"
,
"REQ_DRIVE_TASKFILE"
,
};
};
void
blk_dump_rq_flags
(
struct
request
*
rq
,
char
*
msg
)
void
blk_dump_rq_flags
(
struct
request
*
rq
,
char
*
msg
)
{
{
int
bit
;
int
bit
;
printk
(
"%s: dev %02x:%02x: "
,
msg
,
major
(
rq
->
rq_dev
),
minor
(
rq
->
rq_dev
));
printk
(
"%s: dev %02x:%02x:
flags =
"
,
msg
,
major
(
rq
->
rq_dev
),
minor
(
rq
->
rq_dev
));
bit
=
0
;
bit
=
0
;
do
{
do
{
if
(
rq
->
flags
&
(
1
<<
bit
))
if
(
rq
->
flags
&
(
1
<<
bit
))
...
@@ -670,10 +691,17 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
...
@@ -670,10 +691,17 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
bit
++
;
bit
++
;
}
while
(
bit
<
__REQ_NR_BITS
);
}
while
(
bit
<
__REQ_NR_BITS
);
printk
(
"sector %llu, nr/cnr %lu/%u
\n
"
,
(
unsigned
long
long
)
rq
->
sector
,
printk
(
"
\n
sector %llu, nr/cnr %lu/%u
\n
"
,
(
unsigned
long
long
)
rq
->
sector
,
rq
->
nr_sectors
,
rq
->
nr_sectors
,
rq
->
current_nr_sectors
);
rq
->
current_nr_sectors
);
printk
(
"bio %p, biotail %p
\n
"
,
rq
->
bio
,
rq
->
biotail
);
printk
(
"bio %p, biotail %p, buffer %p, data %p, len %u
\n
"
,
rq
->
bio
,
rq
->
biotail
,
rq
->
buffer
,
rq
->
data
,
rq
->
data_len
);
if
(
rq
->
flags
&
(
REQ_BLOCK_PC
|
REQ_PC
))
{
printk
(
"cdb: "
);
for
(
bit
=
0
;
bit
<
sizeof
(
rq
->
cmd
);
bit
++
)
printk
(
"%02x "
,
rq
->
cmd
[
bit
]);
printk
(
"
\n
"
);
}
}
}
void
blk_recount_segments
(
request_queue_t
*
q
,
struct
bio
*
bio
)
void
blk_recount_segments
(
request_queue_t
*
q
,
struct
bio
*
bio
)
...
@@ -1104,7 +1132,7 @@ static int __blk_cleanup_queue(struct request_list *list)
...
@@ -1104,7 +1132,7 @@ static int __blk_cleanup_queue(struct request_list *list)
while
(
!
list_empty
(
head
))
{
while
(
!
list_empty
(
head
))
{
rq
=
list_entry
(
head
->
next
,
struct
request
,
queuelist
);
rq
=
list_entry
(
head
->
next
,
struct
request
,
queuelist
);
list_del
(
&
rq
->
queuelist
);
list_del
_init
(
&
rq
->
queuelist
);
kmem_cache_free
(
request_cachep
,
rq
);
kmem_cache_free
(
request_cachep
,
rq
);
i
++
;
i
++
;
}
}
...
@@ -1264,13 +1292,20 @@ static struct request *get_request(request_queue_t *q, int rw)
...
@@ -1264,13 +1292,20 @@ static struct request *get_request(request_queue_t *q, int rw)
if
(
!
list_empty
(
&
rl
->
free
))
{
if
(
!
list_empty
(
&
rl
->
free
))
{
rq
=
blkdev_free_rq
(
&
rl
->
free
);
rq
=
blkdev_free_rq
(
&
rl
->
free
);
list_del
(
&
rq
->
queuelist
);
list_del_init
(
&
rq
->
queuelist
);
rq
->
ref_count
=
1
;
rl
->
count
--
;
rl
->
count
--
;
if
(
rl
->
count
<
queue_congestion_on_threshold
())
if
(
rl
->
count
<
queue_congestion_on_threshold
())
set_queue_congested
(
q
,
rw
);
set_queue_congested
(
q
,
rw
);
rq
->
flags
=
0
;
rq
->
flags
=
0
;
rq
->
rq_status
=
RQ_ACTIVE
;
rq
->
rq_status
=
RQ_ACTIVE
;
rq
->
errors
=
0
;
rq
->
special
=
NULL
;
rq
->
special
=
NULL
;
rq
->
buffer
=
NULL
;
rq
->
data
=
NULL
;
rq
->
sense
=
NULL
;
rq
->
waiting
=
NULL
;
rq
->
bio
=
rq
->
biotail
=
NULL
;
rq
->
q
=
q
;
rq
->
q
=
q
;
rq
->
rl
=
rl
;
rq
->
rl
=
rl
;
}
}
...
@@ -1466,26 +1501,22 @@ static inline void add_request(request_queue_t * q, struct request * req,
...
@@ -1466,26 +1501,22 @@ static inline void add_request(request_queue_t * q, struct request * req,
* elevator indicated where it wants this request to be
* elevator indicated where it wants this request to be
* inserted at elevator_merge time
* inserted at elevator_merge time
*/
*/
__elv_add_request
(
q
,
req
,
insert_here
);
__elv_add_request
_pos
(
q
,
req
,
insert_here
);
}
}
/*
void
__blk_put_request
(
request_queue_t
*
q
,
struct
request
*
req
)
* Must be called with queue lock held and interrupts disabled
*/
void
blk_put_request
(
struct
request
*
req
)
{
{
struct
request_list
*
rl
=
req
->
rl
;
struct
request_list
*
rl
=
req
->
rl
;
request_queue_t
*
q
=
req
->
q
;
if
(
unlikely
(
--
req
->
ref_count
))
return
;
if
(
unlikely
(
!
q
))
return
;
req
->
rq_status
=
RQ_INACTIVE
;
req
->
rq_status
=
RQ_INACTIVE
;
req
->
q
=
NULL
;
req
->
q
=
NULL
;
req
->
rl
=
NULL
;
req
->
rl
=
NULL
;
if
(
q
)
{
if
(
q
->
last_merge
==
&
req
->
queuelist
)
q
->
last_merge
=
NULL
;
}
/*
/*
* Request may not have originated from ll_rw_blk. if not,
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
* it didn't come out of our reserved rq pools
...
@@ -1493,6 +1524,8 @@ void blk_put_request(struct request *req)
...
@@ -1493,6 +1524,8 @@ void blk_put_request(struct request *req)
if
(
rl
)
{
if
(
rl
)
{
int
rw
=
0
;
int
rw
=
0
;
BUG_ON
(
!
list_empty
(
&
req
->
queuelist
));
list_add
(
&
req
->
queuelist
,
&
rl
->
free
);
list_add
(
&
req
->
queuelist
,
&
rl
->
free
);
if
(
rl
==
&
q
->
rq
[
WRITE
])
if
(
rl
==
&
q
->
rq
[
WRITE
])
...
@@ -1510,6 +1543,23 @@ void blk_put_request(struct request *req)
...
@@ -1510,6 +1543,23 @@ void blk_put_request(struct request *req)
}
}
}
}
void
blk_put_request
(
struct
request
*
req
)
{
request_queue_t
*
q
=
req
->
q
;
/*
* if req->q isn't set, this request didnt originate from the
* block layer, so it's safe to just disregard it
*/
if
(
q
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
q
->
queue_lock
,
flags
);
__blk_put_request
(
q
,
req
);
spin_unlock_irqrestore
(
q
->
queue_lock
,
flags
);
}
}
/**
/**
* blk_congestion_wait - wait for a queue to become uncongested
* blk_congestion_wait - wait for a queue to become uncongested
* @rw: READ or WRITE
* @rw: READ or WRITE
...
@@ -1568,7 +1618,7 @@ static void attempt_merge(request_queue_t *q, struct request *req,
...
@@ -1568,7 +1618,7 @@ static void attempt_merge(request_queue_t *q, struct request *req,
elv_merge_requests
(
q
,
req
,
next
);
elv_merge_requests
(
q
,
req
,
next
);
blkdev_dequeue_request
(
next
);
blkdev_dequeue_request
(
next
);
blk_put_request
(
next
);
__blk_put_request
(
q
,
next
);
}
}
}
}
...
@@ -1761,7 +1811,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
...
@@ -1761,7 +1811,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
add_request
(
q
,
req
,
insert_here
);
add_request
(
q
,
req
,
insert_here
);
out:
out:
if
(
freereq
)
if
(
freereq
)
blk_put_request
(
freereq
);
__blk_put_request
(
q
,
freereq
);
spin_unlock_irq
(
q
->
queue_lock
);
spin_unlock_irq
(
q
->
queue_lock
);
return
0
;
return
0
;
...
@@ -1891,7 +1941,6 @@ int submit_bio(int rw, struct bio *bio)
...
@@ -1891,7 +1941,6 @@ int submit_bio(int rw, struct bio *bio)
{
{
int
count
=
bio_sectors
(
bio
);
int
count
=
bio_sectors
(
bio
);
BUG_ON
(
!
bio
->
bi_end_io
);
BIO_BUG_ON
(
!
bio
->
bi_size
);
BIO_BUG_ON
(
!
bio
->
bi_size
);
BIO_BUG_ON
(
!
bio
->
bi_io_vec
);
BIO_BUG_ON
(
!
bio
->
bi_io_vec
);
bio
->
bi_rw
=
rw
;
bio
->
bi_rw
=
rw
;
...
@@ -1908,6 +1957,9 @@ inline void blk_recalc_rq_segments(struct request *rq)
...
@@ -1908,6 +1957,9 @@ inline void blk_recalc_rq_segments(struct request *rq)
struct
bio
*
bio
;
struct
bio
*
bio
;
int
nr_phys_segs
,
nr_hw_segs
;
int
nr_phys_segs
,
nr_hw_segs
;
if
(
!
rq
->
bio
)
return
;
rq
->
buffer
=
bio_data
(
rq
->
bio
);
rq
->
buffer
=
bio_data
(
rq
->
bio
);
nr_phys_segs
=
nr_hw_segs
=
0
;
nr_phys_segs
=
nr_hw_segs
=
0
;
...
@@ -1925,7 +1977,7 @@ inline void blk_recalc_rq_segments(struct request *rq)
...
@@ -1925,7 +1977,7 @@ inline void blk_recalc_rq_segments(struct request *rq)
inline
void
blk_recalc_rq_sectors
(
struct
request
*
rq
,
int
nsect
)
inline
void
blk_recalc_rq_sectors
(
struct
request
*
rq
,
int
nsect
)
{
{
if
(
rq
->
bio
)
{
if
(
blk_fs_request
(
rq
)
)
{
rq
->
hard_sector
+=
nsect
;
rq
->
hard_sector
+=
nsect
;
rq
->
nr_sectors
=
rq
->
hard_nr_sectors
-=
nsect
;
rq
->
nr_sectors
=
rq
->
hard_nr_sectors
-=
nsect
;
rq
->
sector
=
rq
->
hard_sector
;
rq
->
sector
=
rq
->
hard_sector
;
...
@@ -1944,27 +1996,19 @@ inline void blk_recalc_rq_sectors(struct request *rq, int nsect)
...
@@ -1944,27 +1996,19 @@ inline void blk_recalc_rq_sectors(struct request *rq, int nsect)
}
}
}
}
/**
static
int
__end_that_request_first
(
struct
request
*
req
,
int
uptodate
,
* end_that_request_first - end I/O on one buffer.
int
nr_bytes
)
* @req: the request being processed
* @uptodate: 0 for I/O error
* @nr_sectors: number of sectors to end I/O on
*
* Description:
* Ends I/O on a number of sectors attached to @req, and sets it up
* for the next range of segments (if any) in the cluster.
*
* Return:
* 0 - we are done with this request, call end_that_request_last()
* 1 - still buffers pending for this request
**/
int
end_that_request_first
(
struct
request
*
req
,
int
uptodate
,
int
nr_sectors
)
{
{
int
total_
nsect
=
0
,
error
=
0
;
int
total_
bytes
,
bio_nbytes
,
error
=
0
,
next_idx
=
0
;
struct
bio
*
bio
;
struct
bio
*
bio
;
req
->
errors
=
0
;
/*
* for a REQ_BLOCK_PC request, we want to carry any eventual
* sense key with us all the way through
*/
if
(
!
blk_pc_request
(
req
))
req
->
errors
=
0
;
if
(
!
uptodate
)
{
if
(
!
uptodate
)
{
error
=
-
EIO
;
error
=
-
EIO
;
if
(
!
(
req
->
flags
&
REQ_QUIET
))
if
(
!
(
req
->
flags
&
REQ_QUIET
))
...
@@ -1973,56 +2017,56 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
...
@@ -1973,56 +2017,56 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
(
unsigned
long
long
)
req
->
sector
);
(
unsigned
long
long
)
req
->
sector
);
}
}
total_bytes
=
bio_nbytes
=
0
;
while
((
bio
=
req
->
bio
))
{
while
((
bio
=
req
->
bio
))
{
int
n
ew_bio
=
0
,
nsect
;
int
n
bytes
;
if
(
unlikely
(
bio
->
bi_idx
>=
bio
->
bi_vcnt
))
{
if
(
nr_bytes
>=
bio
->
bi_size
)
{
printk
(
"%s: bio idx %d >= vcnt %d
\n
"
,
__FUNCTION__
,
req
->
bio
=
bio
->
bi_next
;
nbytes
=
bio
->
bi_size
;
bio_endio
(
bio
,
nbytes
,
error
);
next_idx
=
0
;
bio_nbytes
=
0
;
}
else
{
int
idx
=
bio
->
bi_idx
+
next_idx
;
if
(
unlikely
(
bio
->
bi_idx
>=
bio
->
bi_vcnt
))
{
blk_dump_rq_flags
(
req
,
"__end_that"
);
printk
(
"%s: bio idx %d >= vcnt %d
\n
"
,
__FUNCTION__
,
bio
->
bi_idx
,
bio
->
bi_vcnt
);
bio
->
bi_idx
,
bio
->
bi_vcnt
);
break
;
break
;
}
}
BIO_BUG_ON
(
bio_iovec
(
bio
)
->
bv_len
>
bio
->
bi_size
);
nbytes
=
bio_iovec_idx
(
bio
,
idx
)
->
bv_len
;
BIO_BUG_ON
(
nbytes
>
bio
->
bi_size
);
/*
/*
* not a complete bvec done
* not a complete bvec done
*/
*/
nsect
=
bio_iovec
(
bio
)
->
bv_len
>>
9
;
if
(
unlikely
(
nbytes
>
nr_bytes
))
{
if
(
unlikely
(
nsect
>
nr_sectors
))
{
bio_iovec
(
bio
)
->
bv_offset
+=
nr_bytes
;
int
partial
=
nr_sectors
<<
9
;
bio_iovec
(
bio
)
->
bv_len
-=
nr_bytes
;
bio_nbytes
+=
nr_bytes
;
bio_iovec
(
bio
)
->
bv_offset
+=
partial
;
total_bytes
+=
nr_bytes
;
bio_iovec
(
bio
)
->
bv_len
-=
partial
;
break
;
bio_endio
(
bio
,
partial
,
error
);
}
total_nsect
+=
nr_sectors
;
break
;
}
/*
/*
* we are ending the last part of the bio, advance req pointer
* advance to the next vector
*/
*/
if
((
nsect
<<
9
)
>=
bio
->
bi_size
)
{
next_idx
++
;
req
->
bio
=
bio
->
bi_next
;
bio_nbytes
+=
nbytes
;
new_bio
=
1
;
}
}
bio_endio
(
bio
,
nsect
<<
9
,
error
);
total_bytes
+=
nbytes
;
nr_bytes
-=
nbytes
;
total_nsect
+=
nsect
;
nr_sectors
-=
nsect
;
/*
* if we didn't advance the req->bio pointer, advance bi_idx
* to indicate we are now on the next bio_vec
*/
if
(
!
new_bio
)
bio
->
bi_idx
++
;
if
((
bio
=
req
->
bio
))
{
if
((
bio
=
req
->
bio
))
{
/*
/*
* end more in this run, or just return 'not-done'
* end more in this run, or just return 'not-done'
*/
*/
if
(
unlikely
(
nr_
sector
s
<=
0
))
if
(
unlikely
(
nr_
byte
s
<=
0
))
break
;
break
;
}
}
}
}
...
@@ -2036,17 +2080,64 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
...
@@ -2036,17 +2080,64 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
/*
/*
* if the request wasn't completed, update state
* if the request wasn't completed, update state
*/
*/
blk_recalc_rq_sectors
(
req
,
total_nsect
);
if
(
bio_nbytes
)
{
bio_endio
(
bio
,
bio_nbytes
,
error
);
req
->
bio
->
bi_idx
+=
next_idx
;
}
blk_recalc_rq_sectors
(
req
,
total_bytes
>>
9
);
blk_recalc_rq_segments
(
req
);
blk_recalc_rq_segments
(
req
);
return
1
;
return
1
;
}
}
/**
* end_that_request_first - end I/O on a request
* @req: the request being processed
* @uptodate: 0 for I/O error
* @nr_sectors: number of sectors to end I/O on
*
* Description:
* Ends I/O on a number of sectors attached to @req, and sets it up
* for the next range of segments (if any) in the cluster.
*
* Return:
* 0 - we are done with this request, call end_that_request_last()
* 1 - still buffers pending for this request
**/
int
end_that_request_first
(
struct
request
*
req
,
int
uptodate
,
int
nr_sectors
)
{
return
__end_that_request_first
(
req
,
uptodate
,
nr_sectors
<<
9
);
}
/**
* end_that_request_chunk - end I/O on a request
* @req: the request being processed
* @uptodate: 0 for I/O error
* @nr_bytes: number of bytes to complete
*
* Description:
* Ends I/O on a number of bytes attached to @req, and sets it up
* for the next range of segments (if any). Like end_that_request_first(),
* but deals with bytes instead of sectors.
*
* Return:
* 0 - we are done with this request, call end_that_request_last()
* 1 - still buffers pending for this request
**/
int
end_that_request_chunk
(
struct
request
*
req
,
int
uptodate
,
int
nr_bytes
)
{
return
__end_that_request_first
(
req
,
uptodate
,
nr_bytes
);
}
/*
* queue lock must be held
*/
void
end_that_request_last
(
struct
request
*
req
)
void
end_that_request_last
(
struct
request
*
req
)
{
{
if
(
req
->
waiting
)
if
(
req
->
waiting
)
complete
(
req
->
waiting
);
complete
(
req
->
waiting
);
blk_put_request
(
req
);
__blk_put_request
(
req
->
q
,
req
);
}
}
int
__init
blk_dev_init
(
void
)
int
__init
blk_dev_init
(
void
)
...
@@ -2092,6 +2183,7 @@ int __init blk_dev_init(void)
...
@@ -2092,6 +2183,7 @@ int __init blk_dev_init(void)
};
};
EXPORT_SYMBOL
(
end_that_request_first
);
EXPORT_SYMBOL
(
end_that_request_first
);
EXPORT_SYMBOL
(
end_that_request_chunk
);
EXPORT_SYMBOL
(
end_that_request_last
);
EXPORT_SYMBOL
(
end_that_request_last
);
EXPORT_SYMBOL
(
blk_init_queue
);
EXPORT_SYMBOL
(
blk_init_queue
);
EXPORT_SYMBOL
(
bdev_get_queue
);
EXPORT_SYMBOL
(
bdev_get_queue
);
...
@@ -2112,6 +2204,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments);
...
@@ -2112,6 +2204,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments);
EXPORT_SYMBOL
(
blk_queue_max_segment_size
);
EXPORT_SYMBOL
(
blk_queue_max_segment_size
);
EXPORT_SYMBOL
(
blk_queue_hardsect_size
);
EXPORT_SYMBOL
(
blk_queue_hardsect_size
);
EXPORT_SYMBOL
(
blk_queue_segment_boundary
);
EXPORT_SYMBOL
(
blk_queue_segment_boundary
);
EXPORT_SYMBOL
(
blk_queue_dma_alignment
);
EXPORT_SYMBOL
(
blk_rq_map_sg
);
EXPORT_SYMBOL
(
blk_rq_map_sg
);
EXPORT_SYMBOL
(
blk_nohighio
);
EXPORT_SYMBOL
(
blk_nohighio
);
EXPORT_SYMBOL
(
blk_dump_rq_flags
);
EXPORT_SYMBOL
(
blk_dump_rq_flags
);
...
...
drivers/block/umem.c
View file @
8baa8006
...
@@ -548,12 +548,7 @@ static void process_page(unsigned long data)
...
@@ -548,12 +548,7 @@ static void process_page(unsigned long data)
return_bio
=
bio
->
bi_next
;
return_bio
=
bio
->
bi_next
;
bio
->
bi_next
=
NULL
;
bio
->
bi_next
=
NULL
;
/* should use bio_endio(), however already cleared
bio_endio
(
bio
,
bio
->
bi_size
,
0
);
* BIO_UPTODATE. so set bio->bi_size = 0 manually to indicate
* completely done
*/
bio
->
bi_size
=
0
;
bio
->
bi_end_io
(
bio
,
bytes
,
0
);
}
}
}
}
...
...
drivers/ide/ide-disk.c
View file @
8baa8006
...
@@ -1610,56 +1610,6 @@ static void idedisk_add_settings(ide_drive_t *drive)
...
@@ -1610,56 +1610,6 @@ static void idedisk_add_settings(ide_drive_t *drive)
#endif
#endif
}
}
static
int
idedisk_suspend
(
struct
device
*
dev
,
u32
state
,
u32
level
)
{
ide_drive_t
*
drive
=
dev
->
driver_data
;
printk
(
"Suspending device %p
\n
"
,
dev
->
driver_data
);
/* I hope that every freeze operation from the upper levels have
* already been done...
*/
if
(
level
!=
SUSPEND_SAVE_STATE
)
return
0
;
BUG_ON
(
in_interrupt
());
printk
(
"Waiting for commands to finish
\n
"
);
/* wait until all commands are finished */
/* FIXME: waiting for spinlocks should be done instead. */
if
(
!
(
HWGROUP
(
drive
)))
printk
(
"No hwgroup?
\n
"
);
while
(
HWGROUP
(
drive
)
->
handler
)
yield
();
/* set the drive to standby */
printk
(
KERN_INFO
"suspending: %s "
,
drive
->
name
);
if
(
drive
->
driver
)
{
if
(
drive
->
driver
->
standby
)
drive
->
driver
->
standby
(
drive
);
}
drive
->
blocked
=
1
;
while
(
HWGROUP
(
drive
)
->
handler
)
yield
();
return
0
;
}
static
int
idedisk_resume
(
struct
device
*
dev
,
u32
level
)
{
ide_drive_t
*
drive
=
dev
->
driver_data
;
if
(
level
!=
RESUME_RESTORE_STATE
)
return
0
;
if
(
!
drive
->
blocked
)
panic
(
"ide: Resume but not suspended?
\n
"
);
drive
->
blocked
=
0
;
return
0
;
}
/* This is just a hook for the overall driver tree.
/* This is just a hook for the overall driver tree.
*/
*/
...
...
drivers/ide/ide-floppy.c
View file @
8baa8006
...
@@ -1238,6 +1238,21 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t
...
@@ -1238,6 +1238,21 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t
set_bit
(
PC_DMA_RECOMMENDED
,
&
pc
->
flags
);
set_bit
(
PC_DMA_RECOMMENDED
,
&
pc
->
flags
);
}
}
static
int
idefloppy_blockpc_cmd
(
idefloppy_floppy_t
*
floppy
,
idefloppy_pc_t
*
pc
,
struct
request
*
rq
)
{
/*
* just support eject for now, it would not be hard to make the
* REQ_BLOCK_PC support fully-featured
*/
if
(
rq
->
cmd
[
0
]
!=
IDEFLOPPY_START_STOP_CMD
)
return
1
;
idefloppy_init_pc
(
pc
);
memcpy
(
pc
->
c
,
rq
->
cmd
,
sizeof
(
pc
->
c
));
return
0
;
}
/*
/*
* idefloppy_do_request is our request handling function.
* idefloppy_do_request is our request handling function.
*/
*/
...
@@ -1280,6 +1295,12 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
...
@@ -1280,6 +1295,12 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
idefloppy_create_rw_cmd
(
floppy
,
pc
,
rq
,
block
);
idefloppy_create_rw_cmd
(
floppy
,
pc
,
rq
,
block
);
}
else
if
(
rq
->
flags
&
REQ_SPECIAL
)
{
}
else
if
(
rq
->
flags
&
REQ_SPECIAL
)
{
pc
=
(
idefloppy_pc_t
*
)
rq
->
buffer
;
pc
=
(
idefloppy_pc_t
*
)
rq
->
buffer
;
}
else
if
(
rq
->
flags
&
REQ_BLOCK_PC
)
{
pc
=
idefloppy_next_pc_storage
(
drive
);
if
(
idefloppy_blockpc_cmd
(
floppy
,
pc
,
rq
))
{
idefloppy_do_end_request
(
drive
,
0
,
0
);
return
ide_stopped
;
}
}
else
{
}
else
{
blk_dump_rq_flags
(
rq
,
blk_dump_rq_flags
(
rq
,
"ide-floppy: unsupported command in queue"
);
"ide-floppy: unsupported command in queue"
);
...
...
drivers/ide/ide.c
View file @
8baa8006
...
@@ -878,13 +878,12 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
...
@@ -878,13 +878,12 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
{
{
ide_startstop_t
startstop
;
ide_startstop_t
startstop
;
unsigned
long
block
;
unsigned
long
block
;
ide_hwif_t
*
hwif
=
HWIF
(
drive
);
BUG_ON
(
!
(
rq
->
flags
&
REQ_STARTED
));
BUG_ON
(
!
(
rq
->
flags
&
REQ_STARTED
));
#ifdef DEBUG
#ifdef DEBUG
printk
(
"%s: start_request: current=0x%08lx
\n
"
,
printk
(
"%s: start_request: current=0x%08lx
\n
"
,
hwif
->
name
,
(
unsigned
long
)
rq
);
HWIF
(
drive
)
->
name
,
(
unsigned
long
)
rq
);
#endif
#endif
/* bail early if we've exceeded max_failures */
/* bail early if we've exceeded max_failures */
...
@@ -910,7 +909,7 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
...
@@ -910,7 +909,7 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
block
=
1
;
/* redirect MBR access to EZ-Drive partn table */
block
=
1
;
/* redirect MBR access to EZ-Drive partn table */
#if (DISK_RECOVERY_TIME > 0)
#if (DISK_RECOVERY_TIME > 0)
while
((
read_timer
()
-
hwif
->
last_time
)
<
DISK_RECOVERY_TIME
);
while
((
read_timer
()
-
HWIF
(
drive
)
->
last_time
)
<
DISK_RECOVERY_TIME
);
#endif
#endif
SELECT_DRIVE
(
drive
);
SELECT_DRIVE
(
drive
);
...
@@ -1128,9 +1127,15 @@ void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
...
@@ -1128,9 +1127,15 @@ void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
break
;
break
;
}
}
/*
* we know that the queue isn't empty, but this can happen
* if the q->prep_rq_fn() decides to kill a request
*/
rq
=
elv_next_request
(
&
drive
->
queue
);
rq
=
elv_next_request
(
&
drive
->
queue
);
if
(
!
rq
)
if
(
!
rq
)
{
hwgroup
->
busy
=
!!
ata_pending_commands
(
drive
);
break
;
break
;
}
if
(
!
rq
->
bio
&&
ata_pending_commands
(
drive
))
if
(
!
rq
->
bio
&&
ata_pending_commands
(
drive
))
break
;
break
;
...
@@ -1515,10 +1520,8 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
...
@@ -1515,10 +1520,8 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
{
{
unsigned
long
flags
;
unsigned
long
flags
;
ide_hwgroup_t
*
hwgroup
=
HWGROUP
(
drive
);
ide_hwgroup_t
*
hwgroup
=
HWGROUP
(
drive
);
unsigned
int
major
=
HWIF
(
drive
)
->
major
;
request_queue_t
*
q
=
&
drive
->
queue
;
struct
list_head
*
queue_head
=
&
q
->
queue_head
;
DECLARE_COMPLETION
(
wait
);
DECLARE_COMPLETION
(
wait
);
int
insert_end
=
1
,
err
;
#ifdef CONFIG_BLK_DEV_PDC4030
#ifdef CONFIG_BLK_DEV_PDC4030
if
(
HWIF
(
drive
)
->
chipset
==
ide_pdc4030
&&
rq
->
buffer
!=
NULL
)
if
(
HWIF
(
drive
)
->
chipset
==
ide_pdc4030
&&
rq
->
buffer
!=
NULL
)
...
@@ -1540,29 +1543,35 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
...
@@ -1540,29 +1543,35 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
}
}
rq
->
rq_disk
=
drive
->
disk
;
rq
->
rq_disk
=
drive
->
disk
;
if
(
action
==
ide_wait
)
/*
* we need to hold an extra reference to request for safe inspection
* after completion
*/
if
(
action
==
ide_wait
)
{
rq
->
ref_count
++
;
rq
->
waiting
=
&
wait
;
rq
->
waiting
=
&
wait
;
}
spin_lock_irqsave
(
&
ide_lock
,
flags
);
spin_lock_irqsave
(
&
ide_lock
,
flags
);
if
(
blk_queue_empty
(
q
)
||
action
==
ide_preempt
)
{
if
(
action
==
ide_preempt
)
{
if
(
action
==
ide_preempt
)
hwgroup
->
rq
=
NULL
;
hwgroup
->
rq
=
NULL
;
insert_end
=
0
;
}
else
{
if
(
action
==
ide_wait
||
action
==
ide_end
)
{
queue_head
=
queue_head
->
prev
;
}
else
queue_head
=
queue_head
->
next
;
}
}
q
->
elevator
.
elevator_add_req_fn
(
q
,
rq
,
queue_head
);
__elv_add_request
(
&
drive
->
queue
,
rq
,
insert_end
,
0
);
ide_do_request
(
hwgroup
,
0
);
ide_do_request
(
hwgroup
,
0
);
spin_unlock_irqrestore
(
&
ide_lock
,
flags
);
spin_unlock_irqrestore
(
&
ide_lock
,
flags
);
err
=
0
;
if
(
action
==
ide_wait
)
{
if
(
action
==
ide_wait
)
{
/* wait for it to be serviced */
wait_for_completion
(
&
wait
);
wait_for_completion
(
&
wait
);
/* return -EIO if errors */
if
(
rq
->
errors
)
return
rq
->
errors
?
-
EIO
:
0
;
err
=
-
EIO
;
blk_put_request
(
rq
);
}
}
return
0
;
return
err
;
}
}
EXPORT_SYMBOL
(
ide_do_drive_cmd
);
EXPORT_SYMBOL
(
ide_do_drive_cmd
);
...
@@ -3369,7 +3378,7 @@ int ide_register_driver(ide_driver_t *driver)
...
@@ -3369,7 +3378,7 @@ int ide_register_driver(ide_driver_t *driver)
list_del_init
(
&
drive
->
list
);
list_del_init
(
&
drive
->
list
);
ata_attach
(
drive
);
ata_attach
(
drive
);
}
}
driver
->
gen_driver
.
name
=
driver
->
name
;
driver
->
gen_driver
.
name
=
(
char
*
)
driver
->
name
;
driver
->
gen_driver
.
bus
=
&
ide_bus_type
;
driver
->
gen_driver
.
bus
=
&
ide_bus_type
;
driver
->
gen_driver
.
remove
=
ide_drive_remove
;
driver
->
gen_driver
.
remove
=
ide_drive_remove
;
return
driver_register
(
&
driver
->
gen_driver
);
return
driver_register
(
&
driver
->
gen_driver
);
...
...
drivers/md/linear.c
View file @
8baa8006
...
@@ -52,19 +52,21 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
...
@@ -52,19 +52,21 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
* @bio: the buffer head that's been built up so far
* @bio: the buffer head that's been built up so far
* @biovec: the request that could be merged to it.
* @biovec: the request that could be merged to it.
*
*
* Return 1 if the merge is not permitted (because the
* Return amount of bytes we can take at this offset
* result would cross a device boundary), 0 otherwise.
*/
*/
static
int
linear_mergeable_bvec
(
request_queue_t
*
q
,
struct
bio
*
bio
,
struct
bio_vec
*
biovec
)
static
int
linear_mergeable_bvec
(
request_queue_t
*
q
,
struct
bio
*
bio
,
struct
bio_vec
*
biovec
)
{
{
mddev_t
*
mddev
=
q
->
queuedata
;
mddev_t
*
mddev
=
q
->
queuedata
;
dev_info_t
*
dev0
,
*
dev1
;
dev_info_t
*
dev0
;
int
maxsectors
,
bio_sectors
=
(
bio
->
bi_size
+
biovec
->
bv_len
)
>>
9
;
dev0
=
which_dev
(
mddev
,
bio
->
bi_sector
);
dev0
=
which_dev
(
mddev
,
bio
->
bi_sector
);
dev1
=
which_dev
(
mddev
,
bio
->
bi_sector
+
maxsectors
=
(
dev0
->
size
<<
1
)
-
(
bio
->
bi_sector
-
(
dev0
->
offset
<<
1
));
((
bio
->
bi_size
+
biovec
->
bv_len
-
1
)
>>
9
));
return
dev0
!=
dev1
;
if
(
bio_sectors
<=
maxsectors
)
return
biovec
->
bv_len
;
return
(
maxsectors
<<
9
)
-
bio
->
bi_size
;
}
}
static
int
linear_run
(
mddev_t
*
mddev
)
static
int
linear_run
(
mddev_t
*
mddev
)
...
...
drivers/md/raid0.c
View file @
8baa8006
...
@@ -168,8 +168,7 @@ static int create_strip_zones (mddev_t *mddev)
...
@@ -168,8 +168,7 @@ static int create_strip_zones (mddev_t *mddev)
* @bio: the buffer head that's been built up so far
* @bio: the buffer head that's been built up so far
* @biovec: the request that could be merged to it.
* @biovec: the request that could be merged to it.
*
*
* Return 1 if the merge is not permitted (because the
* Return amount of bytes we can accept at this offset
* result would cross a chunk boundary), 0 otherwise.
*/
*/
static
int
raid0_mergeable_bvec
(
request_queue_t
*
q
,
struct
bio
*
bio
,
struct
bio_vec
*
biovec
)
static
int
raid0_mergeable_bvec
(
request_queue_t
*
q
,
struct
bio
*
bio
,
struct
bio_vec
*
biovec
)
{
{
...
@@ -182,7 +181,7 @@ static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_
...
@@ -182,7 +181,7 @@ static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_
block
=
bio
->
bi_sector
>>
1
;
block
=
bio
->
bi_sector
>>
1
;
bio_sz
=
(
bio
->
bi_size
+
biovec
->
bv_len
)
>>
10
;
bio_sz
=
(
bio
->
bi_size
+
biovec
->
bv_len
)
>>
10
;
return
chunk_size
<
((
block
&
(
chunk_size
-
1
))
+
bio_sz
)
;
return
(
chunk_size
-
((
block
&
(
chunk_size
-
1
))
+
bio_sz
))
<<
10
;
}
}
static
int
raid0_run
(
mddev_t
*
mddev
)
static
int
raid0_run
(
mddev_t
*
mddev
)
...
...
drivers/scsi/scsi_lib.c
View file @
8baa8006
...
@@ -240,7 +240,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
...
@@ -240,7 +240,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
SCpnt
->
request
->
special
=
(
void
*
)
SCpnt
;
SCpnt
->
request
->
special
=
(
void
*
)
SCpnt
;
if
(
blk_rq_tagged
(
SCpnt
->
request
))
if
(
blk_rq_tagged
(
SCpnt
->
request
))
blk_queue_end_tag
(
q
,
SCpnt
->
request
);
blk_queue_end_tag
(
q
,
SCpnt
->
request
);
_elv_add_request
(
q
,
SCpnt
->
request
,
0
,
0
);
_
_
elv_add_request
(
q
,
SCpnt
->
request
,
0
,
0
);
}
}
/*
/*
...
@@ -951,7 +951,7 @@ void scsi_request_fn(request_queue_t * q)
...
@@ -951,7 +951,7 @@ void scsi_request_fn(request_queue_t * q)
SCpnt
->
request
->
flags
|=
REQ_SPECIAL
;
SCpnt
->
request
->
flags
|=
REQ_SPECIAL
;
if
(
blk_rq_tagged
(
SCpnt
->
request
))
if
(
blk_rq_tagged
(
SCpnt
->
request
))
blk_queue_end_tag
(
q
,
SCpnt
->
request
);
blk_queue_end_tag
(
q
,
SCpnt
->
request
);
_elv_add_request
(
q
,
SCpnt
->
request
,
0
,
0
);
_
_
elv_add_request
(
q
,
SCpnt
->
request
,
0
,
0
);
break
;
break
;
}
}
...
...
drivers/scsi/sr_ioctl.c
View file @
8baa8006
...
@@ -160,13 +160,11 @@ int sr_do_ioctl(Scsi_CD *cd, struct cdrom_generic_command *cgc)
...
@@ -160,13 +160,11 @@ int sr_do_ioctl(Scsi_CD *cd, struct cdrom_generic_command *cgc)
if
(
!
cgc
->
quiet
)
if
(
!
cgc
->
quiet
)
printk
(
KERN_ERR
"%s: CDROM (ioctl) reports ILLEGAL "
printk
(
KERN_ERR
"%s: CDROM (ioctl) reports ILLEGAL "
"REQUEST.
\n
"
,
cd
->
cdi
.
name
);
"REQUEST.
\n
"
,
cd
->
cdi
.
name
);
err
=
-
EIO
;
if
(
SRpnt
->
sr_sense_buffer
[
12
]
==
0x20
&&
if
(
SRpnt
->
sr_sense_buffer
[
12
]
==
0x20
&&
SRpnt
->
sr_sense_buffer
[
13
]
==
0x00
)
{
SRpnt
->
sr_sense_buffer
[
13
]
==
0x00
)
/* sense: Invalid command operation code */
/* sense: Invalid command operation code */
err
=
-
EDRIVE_CANT_DO_THIS
;
err
=
-
EDRIVE_CANT_DO_THIS
;
}
else
{
err
=
-
EINVAL
;
}
#ifdef DEBUG
#ifdef DEBUG
print_command
(
cgc
->
cmd
);
print_command
(
cgc
->
cmd
);
print_req_sense
(
"sr"
,
SRpnt
);
print_req_sense
(
"sr"
,
SRpnt
);
...
...
fs/bio.c
View file @
8baa8006
...
@@ -122,6 +122,7 @@ inline void bio_init(struct bio *bio)
...
@@ -122,6 +122,7 @@ inline void bio_init(struct bio *bio)
bio
->
bi_max_vecs
=
0
;
bio
->
bi_max_vecs
=
0
;
bio
->
bi_end_io
=
NULL
;
bio
->
bi_end_io
=
NULL
;
atomic_set
(
&
bio
->
bi_cnt
,
1
);
atomic_set
(
&
bio
->
bi_cnt
,
1
);
bio
->
bi_private
=
NULL
;
}
}
/**
/**
...
@@ -354,7 +355,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
...
@@ -354,7 +355,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
request_queue_t
*
q
=
bdev_get_queue
(
bdev
);
request_queue_t
*
q
=
bdev_get_queue
(
bdev
);
int
nr_pages
;
int
nr_pages
;
nr_pages
=
q
->
max_sectors
>>
(
PAGE_SHIFT
-
9
)
;
nr_pages
=
((
q
->
max_sectors
<<
9
)
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
if
(
nr_pages
>
q
->
max_phys_segments
)
if
(
nr_pages
>
q
->
max_phys_segments
)
nr_pages
=
q
->
max_phys_segments
;
nr_pages
=
q
->
max_phys_segments
;
if
(
nr_pages
>
q
->
max_hw_segments
)
if
(
nr_pages
>
q
->
max_hw_segments
)
...
@@ -385,13 +386,13 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
...
@@ -385,13 +386,13 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* cloned bio must not modify vec list
* cloned bio must not modify vec list
*/
*/
if
(
unlikely
(
bio_flagged
(
bio
,
BIO_CLONED
)))
if
(
unlikely
(
bio_flagged
(
bio
,
BIO_CLONED
)))
return
1
;
return
0
;
if
(
bio
->
bi_vcnt
>=
bio
->
bi_max_vecs
)
if
(
bio
->
bi_vcnt
>=
bio
->
bi_max_vecs
)
return
1
;
return
0
;
if
(((
bio
->
bi_size
+
len
)
>>
9
)
>
q
->
max_sectors
)
if
(((
bio
->
bi_size
+
len
)
>>
9
)
>
q
->
max_sectors
)
return
1
;
return
0
;
/*
/*
* we might loose a segment or two here, but rather that than
* we might loose a segment or two here, but rather that than
...
@@ -404,7 +405,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
...
@@ -404,7 +405,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
if
(
fail_segments
)
{
if
(
fail_segments
)
{
if
(
retried_segments
)
if
(
retried_segments
)
return
1
;
return
0
;
bio
->
bi_flags
&=
~
(
1
<<
BIO_SEG_VALID
);
bio
->
bi_flags
&=
~
(
1
<<
BIO_SEG_VALID
);
retried_segments
=
1
;
retried_segments
=
1
;
...
@@ -425,18 +426,24 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
...
@@ -425,18 +426,24 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* depending on offset), it can specify a merge_bvec_fn in the
* depending on offset), it can specify a merge_bvec_fn in the
* queue to get further control
* queue to get further control
*/
*/
if
(
q
->
merge_bvec_fn
&&
q
->
merge_bvec_fn
(
q
,
bio
,
bvec
))
{
if
(
q
->
merge_bvec_fn
)
{
bvec
->
bv_page
=
NULL
;
/*
bvec
->
bv_len
=
0
;
* merge_bvec_fn() returns number of bytes it can accept
bvec
->
bv_offset
=
0
;
* at this offset
return
1
;
*/
if
(
q
->
merge_bvec_fn
(
q
,
bio
,
bvec
)
<
len
)
{
bvec
->
bv_page
=
NULL
;
bvec
->
bv_len
=
0
;
bvec
->
bv_offset
=
0
;
return
0
;
}
}
}
bio
->
bi_vcnt
++
;
bio
->
bi_vcnt
++
;
bio
->
bi_phys_segments
++
;
bio
->
bi_phys_segments
++
;
bio
->
bi_hw_segments
++
;
bio
->
bi_hw_segments
++
;
bio
->
bi_size
+=
len
;
bio
->
bi_size
+=
len
;
return
0
;
return
len
;
}
}
/**
/**
...
@@ -446,14 +453,15 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
...
@@ -446,14 +453,15 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* @error: error, if any
* @error: error, if any
*
*
* Description:
* Description:
* bio_endio() will end I/O
@bytes_done number of bytes. This may be just
* bio_endio() will end I/O
on @bytes_done number of bytes. This may be
*
a partial part of the bio, or it may be the whole bio. bio_endio() is
*
just a partial part of the bio, or it may be the whole bio. bio_endio()
* the preferred way to end I/O on a bio, it takes care of decrementing
*
is
the preferred way to end I/O on a bio, it takes care of decrementing
* bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
* bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
* and one of the established -Exxxx (-EIO, for instance) error values in
* and one of the established -Exxxx (-EIO, for instance) error values in
* case something went wrong.
* case something went wrong. Noone should call bi_end_io() directly on
* a bio unless they own it and thus know that it has an end_io function.
**/
**/
int
bio_endio
(
struct
bio
*
bio
,
unsigned
int
bytes_done
,
int
error
)
void
bio_endio
(
struct
bio
*
bio
,
unsigned
int
bytes_done
,
int
error
)
{
{
if
(
error
)
if
(
error
)
clear_bit
(
BIO_UPTODATE
,
&
bio
->
bi_flags
);
clear_bit
(
BIO_UPTODATE
,
&
bio
->
bi_flags
);
...
@@ -465,7 +473,9 @@ int bio_endio(struct bio *bio, unsigned int bytes_done, int error)
...
@@ -465,7 +473,9 @@ int bio_endio(struct bio *bio, unsigned int bytes_done, int error)
}
}
bio
->
bi_size
-=
bytes_done
;
bio
->
bi_size
-=
bytes_done
;
return
bio
->
bi_end_io
(
bio
,
bytes_done
,
error
);
if
(
bio
->
bi_end_io
)
bio
->
bi_end_io
(
bio
,
bytes_done
,
error
);
}
}
static
void
__init
biovec_init_pools
(
void
)
static
void
__init
biovec_init_pools
(
void
)
...
@@ -537,7 +547,7 @@ static int __init init_bio(void)
...
@@ -537,7 +547,7 @@ static int __init init_bio(void)
return
0
;
return
0
;
}
}
module_init
(
init_bio
);
subsys_initcall
(
init_bio
);
EXPORT_SYMBOL
(
bio_alloc
);
EXPORT_SYMBOL
(
bio_alloc
);
EXPORT_SYMBOL
(
bio_put
);
EXPORT_SYMBOL
(
bio_put
);
...
...
fs/direct-io.c
View file @
8baa8006
...
@@ -417,12 +417,12 @@ dio_bio_add_page(struct dio *dio, struct page *page,
...
@@ -417,12 +417,12 @@ dio_bio_add_page(struct dio *dio, struct page *page,
/* Take a ref against the page each time it is placed into a BIO */
/* Take a ref against the page each time it is placed into a BIO */
page_cache_get
(
page
);
page_cache_get
(
page
);
if
(
bio_add_page
(
dio
->
bio
,
page
,
bv_len
,
bv_offset
))
{
if
(
bio_add_page
(
dio
->
bio
,
page
,
bv_len
,
bv_offset
)
<
bv_len
)
{
dio_bio_submit
(
dio
);
dio_bio_submit
(
dio
);
ret
=
dio_new_bio
(
dio
,
blkno
);
ret
=
dio_new_bio
(
dio
,
blkno
);
if
(
ret
==
0
)
{
if
(
ret
==
0
)
{
ret
=
bio_add_page
(
dio
->
bio
,
page
,
bv_len
,
bv_offset
);
ret
=
bio_add_page
(
dio
->
bio
,
page
,
bv_len
,
bv_offset
);
BUG_ON
(
ret
!=
0
);
BUG_ON
(
ret
<
bv_len
);
}
else
{
}
else
{
/* The page didn't make it into a BIO */
/* The page didn't make it into a BIO */
page_cache_release
(
page
);
page_cache_release
(
page
);
...
...
fs/mpage.c
View file @
8baa8006
...
@@ -176,6 +176,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
...
@@ -176,6 +176,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
unsigned
first_hole
=
blocks_per_page
;
unsigned
first_hole
=
blocks_per_page
;
struct
block_device
*
bdev
=
NULL
;
struct
block_device
*
bdev
=
NULL
;
struct
buffer_head
bh
;
struct
buffer_head
bh
;
int
length
;
if
(
page_has_buffers
(
page
))
if
(
page_has_buffers
(
page
))
goto
confused
;
goto
confused
;
...
@@ -233,7 +234,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
...
@@ -233,7 +234,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
goto
confused
;
goto
confused
;
}
}
if
(
bio_add_page
(
bio
,
page
,
first_hole
<<
blkbits
,
0
))
{
length
=
first_hole
<<
blkbits
;
if
(
bio_add_page
(
bio
,
page
,
length
,
0
)
<
length
)
{
bio
=
mpage_bio_submit
(
READ
,
bio
);
bio
=
mpage_bio_submit
(
READ
,
bio
);
goto
alloc_new
;
goto
alloc_new
;
}
}
...
@@ -334,6 +336,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
...
@@ -334,6 +336,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
int
boundary
=
0
;
int
boundary
=
0
;
sector_t
boundary_block
=
0
;
sector_t
boundary_block
=
0
;
struct
block_device
*
boundary_bdev
=
NULL
;
struct
block_device
*
boundary_bdev
=
NULL
;
int
length
;
if
(
page_has_buffers
(
page
))
{
if
(
page_has_buffers
(
page
))
{
struct
buffer_head
*
head
=
page_buffers
(
page
);
struct
buffer_head
*
head
=
page_buffers
(
page
);
...
@@ -467,7 +470,8 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
...
@@ -467,7 +470,8 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
try_to_free_buffers
(
page
);
try_to_free_buffers
(
page
);
}
}
if
(
bio_add_page
(
bio
,
page
,
first_unmapped
<<
blkbits
,
0
))
{
length
=
first_unmapped
<<
blkbits
;
if
(
bio_add_page
(
bio
,
page
,
length
,
0
)
<
length
)
{
bio
=
mpage_bio_submit
(
WRITE
,
bio
);
bio
=
mpage_bio_submit
(
WRITE
,
bio
);
goto
alloc_new
;
goto
alloc_new
;
}
}
...
...
fs/pipe.c
View file @
8baa8006
...
@@ -109,7 +109,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
...
@@ -109,7 +109,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
break
;
break
;
}
}
if
(
do_wakeup
)
{
if
(
do_wakeup
)
{
wake_up_interruptible
(
PIPE_WAIT
(
*
inode
));
wake_up_interruptible
_sync
(
PIPE_WAIT
(
*
inode
));
kill_fasync
(
PIPE_FASYNC_WRITERS
(
*
inode
),
SIGIO
,
POLL_OUT
);
kill_fasync
(
PIPE_FASYNC_WRITERS
(
*
inode
),
SIGIO
,
POLL_OUT
);
}
}
pipe_wait
(
inode
);
pipe_wait
(
inode
);
...
@@ -117,7 +117,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
...
@@ -117,7 +117,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
up
(
PIPE_SEM
(
*
inode
));
up
(
PIPE_SEM
(
*
inode
));
/* Signal writers asynchronously that there is more room. */
/* Signal writers asynchronously that there is more room. */
if
(
do_wakeup
)
{
if
(
do_wakeup
)
{
wake_up_interruptible
_sync
(
PIPE_WAIT
(
*
inode
));
wake_up_interruptible
(
PIPE_WAIT
(
*
inode
));
kill_fasync
(
PIPE_FASYNC_WRITERS
(
*
inode
),
SIGIO
,
POLL_OUT
);
kill_fasync
(
PIPE_FASYNC_WRITERS
(
*
inode
),
SIGIO
,
POLL_OUT
);
}
}
if
(
ret
>
0
)
if
(
ret
>
0
)
...
...
fs/xfs/pagebuf/page_buf.c
View file @
8baa8006
...
@@ -1448,7 +1448,7 @@ pagebuf_iorequest( /* start real I/O */
...
@@ -1448,7 +1448,7 @@ pagebuf_iorequest( /* start real I/O */
if
(
nbytes
>
size
)
if
(
nbytes
>
size
)
nbytes
=
size
;
nbytes
=
size
;
if
(
bio_add_page
(
bio
,
pb
->
pb_pages
[
map_i
],
nbytes
,
offset
))
if
(
bio_add_page
(
bio
,
pb
->
pb_pages
[
map_i
],
nbytes
,
offset
)
<
nbytes
)
break
;
break
;
offset
=
0
;
offset
=
0
;
...
...
include/asm-i386/ide.h
View file @
8baa8006
...
@@ -70,6 +70,7 @@ static __inline__ void ide_init_default_hwifs(void)
...
@@ -70,6 +70,7 @@ static __inline__ void ide_init_default_hwifs(void)
int
index
;
int
index
;
for
(
index
=
0
;
index
<
MAX_HWIFS
;
index
++
)
{
for
(
index
=
0
;
index
<
MAX_HWIFS
;
index
++
)
{
memset
(
&
hw
,
0
,
sizeof
hw
);
ide_init_hwif_ports
(
&
hw
,
ide_default_io_base
(
index
),
0
,
NULL
);
ide_init_hwif_ports
(
&
hw
,
ide_default_io_base
(
index
),
0
,
NULL
);
hw
.
irq
=
ide_default_irq
(
ide_default_io_base
(
index
));
hw
.
irq
=
ide_default_irq
(
ide_default_io_base
(
index
));
ide_register_hw
(
&
hw
,
NULL
);
ide_register_hw
(
&
hw
,
NULL
);
...
...
include/linux/bio.h
View file @
8baa8006
...
@@ -101,6 +101,7 @@ struct bio {
...
@@ -101,6 +101,7 @@ struct bio {
#define BIO_EOF 2
/* out-out-bounds error */
#define BIO_EOF 2
/* out-out-bounds error */
#define BIO_SEG_VALID 3
/* nr_hw_seg valid */
#define BIO_SEG_VALID 3
/* nr_hw_seg valid */
#define BIO_CLONED 4
/* doesn't own data */
#define BIO_CLONED 4
/* doesn't own data */
#define BIO_BOUNCED 5
/* bio is a bounce bio */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
/*
...
@@ -201,7 +202,7 @@ struct bio {
...
@@ -201,7 +202,7 @@ struct bio {
extern
struct
bio
*
bio_alloc
(
int
,
int
);
extern
struct
bio
*
bio_alloc
(
int
,
int
);
extern
void
bio_put
(
struct
bio
*
);
extern
void
bio_put
(
struct
bio
*
);
extern
int
bio_endio
(
struct
bio
*
,
unsigned
int
,
int
);
extern
void
bio_endio
(
struct
bio
*
,
unsigned
int
,
int
);
struct
request_queue
;
struct
request_queue
;
extern
inline
int
bio_phys_segments
(
struct
request_queue
*
,
struct
bio
*
);
extern
inline
int
bio_phys_segments
(
struct
request_queue
*
,
struct
bio
*
);
extern
inline
int
bio_hw_segments
(
struct
request_queue
*
,
struct
bio
*
);
extern
inline
int
bio_hw_segments
(
struct
request_queue
*
,
struct
bio
*
);
...
...
include/linux/blk.h
View file @
8baa8006
...
@@ -39,33 +39,20 @@ void initrd_init(void);
...
@@ -39,33 +39,20 @@ void initrd_init(void);
*/
*/
extern
int
end_that_request_first
(
struct
request
*
,
int
,
int
);
extern
int
end_that_request_first
(
struct
request
*
,
int
,
int
);
extern
int
end_that_request_chunk
(
struct
request
*
,
int
,
int
);
extern
void
end_that_request_last
(
struct
request
*
);
extern
void
end_that_request_last
(
struct
request
*
);
struct
request
*
elv_next_request
(
request_queue_t
*
q
);
struct
request
*
elv_next_request
(
request_queue_t
*
q
);
static
inline
void
blkdev_dequeue_request
(
struct
request
*
req
)
static
inline
void
blkdev_dequeue_request
(
struct
request
*
req
)
{
{
list_del
(
&
req
->
queuelist
);
BUG_ON
(
list_empty
(
&
req
->
queuelist
));
list_del_init
(
&
req
->
queuelist
);
if
(
req
->
q
)
if
(
req
->
q
)
elv_remove_request
(
req
->
q
,
req
);
elv_remove_request
(
req
->
q
,
req
);
}
}
#define _elv_add_request_core(q, rq, where, plug) \
do { \
if ((plug)) \
blk_plug_device((q)); \
(q)->elevator.elevator_add_req_fn((q), (rq), (where)); \
} while (0)
#define _elv_add_request(q, rq, back, p) do { \
if ((back)) \
_elv_add_request_core((q), (rq), (q)->queue_head.prev, (p)); \
else \
_elv_add_request_core((q), (rq), &(q)->queue_head, (p)); \
} while (0)
#define elv_add_request(q, rq, back) _elv_add_request((q), (rq), (back), 1)
#if defined(MAJOR_NR) || defined(IDE_DRIVER)
#if defined(MAJOR_NR) || defined(IDE_DRIVER)
#if (MAJOR_NR != SCSI_TAPE_MAJOR) && (MAJOR_NR != OSST_MAJOR)
#if (MAJOR_NR != SCSI_TAPE_MAJOR) && (MAJOR_NR != OSST_MAJOR)
#if !defined(IDE_DRIVER)
#if !defined(IDE_DRIVER)
...
...
include/linux/blkdev.h
View file @
8baa8006
...
@@ -26,6 +26,8 @@ struct request {
...
@@ -26,6 +26,8 @@ struct request {
struct
list_head
queuelist
;
/* looking for ->queue? you must _not_
struct
list_head
queuelist
;
/* looking for ->queue? you must _not_
* access it directly, use
* access it directly, use
* blkdev_dequeue_request! */
* blkdev_dequeue_request! */
int
ref_count
;
void
*
elevator_private
;
void
*
elevator_private
;
unsigned
char
cmd
[
16
];
unsigned
char
cmd
[
16
];
...
@@ -215,6 +217,7 @@ struct request_queue
...
@@ -215,6 +217,7 @@ struct request_queue
unsigned
int
max_segment_size
;
unsigned
int
max_segment_size
;
unsigned
long
seg_boundary_mask
;
unsigned
long
seg_boundary_mask
;
unsigned
int
dma_alignment
;
wait_queue_head_t
queue_wait
;
wait_queue_head_t
queue_wait
;
...
@@ -254,6 +257,13 @@ struct request_queue
...
@@ -254,6 +257,13 @@ struct request_queue
*/
*/
#define blk_queue_headactive(q, head_active)
#define blk_queue_headactive(q, head_active)
/*
* q->prep_rq_fn return values
*/
#define BLKPREP_OK 0
/* serve it */
#define BLKPREP_KILL 1
/* fatal error, kill */
#define BLKPREP_DEFER 2
/* leave on queue */
extern
unsigned
long
blk_max_low_pfn
,
blk_max_pfn
;
extern
unsigned
long
blk_max_low_pfn
,
blk_max_pfn
;
/*
/*
...
@@ -268,7 +278,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
...
@@ -268,7 +278,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
extern
int
init_emergency_isa_pool
(
void
);
extern
int
init_emergency_isa_pool
(
void
);
void
blk_queue_bounce
(
request_queue_t
*
q
,
struct
bio
**
bio
);
inline
void
blk_queue_bounce
(
request_queue_t
*
q
,
struct
bio
**
bio
);
#define rq_for_each_bio(bio, rq) \
#define rq_for_each_bio(bio, rq) \
if ((rq->bio)) \
if ((rq->bio)) \
...
@@ -339,6 +349,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
...
@@ -339,6 +349,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern
void
blk_queue_assign_lock
(
request_queue_t
*
,
spinlock_t
*
);
extern
void
blk_queue_assign_lock
(
request_queue_t
*
,
spinlock_t
*
);
extern
void
blk_queue_prep_rq
(
request_queue_t
*
,
prep_rq_fn
*
pfn
);
extern
void
blk_queue_prep_rq
(
request_queue_t
*
,
prep_rq_fn
*
pfn
);
extern
void
blk_queue_merge_bvec
(
request_queue_t
*
,
merge_bvec_fn
*
);
extern
void
blk_queue_merge_bvec
(
request_queue_t
*
,
merge_bvec_fn
*
);
extern
void
blk_queue_dma_alignment
(
request_queue_t
*
,
int
);
extern
struct
backing_dev_info
*
blk_get_backing_dev_info
(
struct
block_device
*
bdev
);
extern
struct
backing_dev_info
*
blk_get_backing_dev_info
(
struct
block_device
*
bdev
);
extern
int
blk_rq_map_sg
(
request_queue_t
*
,
struct
request
*
,
struct
scatterlist
*
);
extern
int
blk_rq_map_sg
(
request_queue_t
*
,
struct
request
*
,
struct
scatterlist
*
);
...
@@ -385,6 +396,21 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
...
@@ -385,6 +396,21 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
return
queue_hardsect_size
(
bdev_get_queue
(
bdev
));
return
queue_hardsect_size
(
bdev_get_queue
(
bdev
));
}
}
static
inline
int
queue_dma_alignment
(
request_queue_t
*
q
)
{
int
retval
=
511
;
if
(
q
&&
q
->
dma_alignment
)
retval
=
q
->
dma_alignment
;
return
retval
;
}
static
inline
int
bdev_dma_aligment
(
struct
block_device
*
bdev
)
{
return
queue_dma_alignment
(
bdev_get_queue
(
bdev
));
}
#define blk_finished_io(nsects) do { } while (0)
#define blk_finished_io(nsects) do { } while (0)
#define blk_started_io(nsects) do { } while (0)
#define blk_started_io(nsects) do { } while (0)
...
...
include/linux/elevator.h
View file @
8baa8006
...
@@ -40,8 +40,8 @@ struct elevator_s
...
@@ -40,8 +40,8 @@ struct elevator_s
/*
/*
* block elevator interface
* block elevator interface
*/
*/
extern
void
__elv_add_request
(
request_queue_t
*
,
struct
request
*
,
extern
void
elv_add_request
(
request_queue_t
*
,
struct
request
*
,
int
,
int
);
struct
list_head
*
);
extern
void
__elv_add_request
(
request_queue_t
*
,
struct
request
*
,
int
,
int
);
extern
int
elv_merge
(
request_queue_t
*
,
struct
list_head
**
,
struct
bio
*
);
extern
int
elv_merge
(
request_queue_t
*
,
struct
list_head
**
,
struct
bio
*
);
extern
void
elv_merge_requests
(
request_queue_t
*
,
struct
request
*
,
extern
void
elv_merge_requests
(
request_queue_t
*
,
struct
request
*
,
struct
request
*
);
struct
request
*
);
...
@@ -50,6 +50,9 @@ extern void elv_remove_request(request_queue_t *, struct request *);
...
@@ -50,6 +50,9 @@ extern void elv_remove_request(request_queue_t *, struct request *);
extern
int
elv_queue_empty
(
request_queue_t
*
);
extern
int
elv_queue_empty
(
request_queue_t
*
);
extern
inline
struct
list_head
*
elv_get_sort_head
(
request_queue_t
*
,
struct
request
*
);
extern
inline
struct
list_head
*
elv_get_sort_head
(
request_queue_t
*
,
struct
request
*
);
#define __elv_add_request_pos(q, rq, pos) \
(q)->elevator.elevator_add_req_fn((q), (rq), (pos))
/*
/*
* noop I/O scheduler. always merges, always inserts new request at tail
* noop I/O scheduler. always merges, always inserts new request at tail
*/
*/
...
...
mm/highmem.c
View file @
8baa8006
...
@@ -366,34 +366,13 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
...
@@ -366,34 +366,13 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
return
0
;
return
0
;
}
}
void
blk_queue_bounce
(
request_queue_t
*
q
,
struct
bio
**
bio_orig
)
void
__blk_queue_bounce
(
request_queue_t
*
q
,
struct
bio
**
bio_orig
,
int
bio_gfp
,
mempool_t
*
pool
)
{
{
struct
page
*
page
;
struct
page
*
page
;
struct
bio
*
bio
=
NULL
;
struct
bio
*
bio
=
NULL
;
int
i
,
rw
=
bio_data_dir
(
*
bio_orig
)
,
bio_gfp
;
int
i
,
rw
=
bio_data_dir
(
*
bio_orig
);
struct
bio_vec
*
to
,
*
from
;
struct
bio_vec
*
to
,
*
from
;
mempool_t
*
pool
;
unsigned
long
pfn
=
q
->
bounce_pfn
;
int
gfp
=
q
->
bounce_gfp
;
BUG_ON
((
*
bio_orig
)
->
bi_idx
);
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments
*/
if
(
!
(
gfp
&
GFP_DMA
))
{
if
(
pfn
>=
blk_max_pfn
)
return
;
bio_gfp
=
GFP_NOHIGHIO
;
pool
=
page_pool
;
}
else
{
BUG_ON
(
!
isa_page_pool
);
bio_gfp
=
GFP_NOIO
;
pool
=
isa_page_pool
;
}
bio_for_each_segment
(
from
,
*
bio_orig
,
i
)
{
bio_for_each_segment
(
from
,
*
bio_orig
,
i
)
{
page
=
from
->
bv_page
;
page
=
from
->
bv_page
;
...
@@ -401,7 +380,7 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
...
@@ -401,7 +380,7 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
/*
/*
* is destination page below bounce pfn?
* is destination page below bounce pfn?
*/
*/
if
((
page
-
page_zone
(
page
)
->
zone_mem_map
)
+
(
page_zone
(
page
)
->
zone_start_pfn
)
<
pfn
)
if
((
page
-
page_zone
(
page
)
->
zone_mem_map
)
+
(
page_zone
(
page
)
->
zone_start_pfn
)
<
q
->
bounce_
pfn
)
continue
;
continue
;
/*
/*
...
@@ -412,11 +391,11 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
...
@@ -412,11 +391,11 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
to
=
bio
->
bi_io_vec
+
i
;
to
=
bio
->
bi_io_vec
+
i
;
to
->
bv_page
=
mempool_alloc
(
pool
,
gfp
);
to
->
bv_page
=
mempool_alloc
(
pool
,
q
->
bounce_
gfp
);
to
->
bv_len
=
from
->
bv_len
;
to
->
bv_len
=
from
->
bv_len
;
to
->
bv_offset
=
from
->
bv_offset
;
to
->
bv_offset
=
from
->
bv_offset
;
if
(
rw
&
WRITE
)
{
if
(
rw
==
WRITE
)
{
char
*
vto
,
*
vfrom
;
char
*
vto
,
*
vfrom
;
vto
=
page_address
(
to
->
bv_page
)
+
to
->
bv_offset
;
vto
=
page_address
(
to
->
bv_page
)
+
to
->
bv_offset
;
...
@@ -437,15 +416,16 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
...
@@ -437,15 +416,16 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
* pages
* pages
*/
*/
bio_for_each_segment
(
from
,
*
bio_orig
,
i
)
{
bio_for_each_segment
(
from
,
*
bio_orig
,
i
)
{
to
=
&
bio
->
bi_io_vec
[
i
]
;
to
=
bio_iovec_idx
(
bio
,
i
)
;
if
(
!
to
->
bv_page
)
{
if
(
!
to
->
bv_page
)
{
to
->
bv_page
=
from
->
bv_page
;
to
->
bv_page
=
from
->
bv_page
;
to
->
bv_len
=
from
->
bv_len
;
to
->
bv_len
=
from
->
bv_len
;
to
->
bv_offset
=
to
->
bv_offset
;
to
->
bv_offset
=
from
->
bv_offset
;
}
}
}
}
bio
->
bi_bdev
=
(
*
bio_orig
)
->
bi_bdev
;
bio
->
bi_bdev
=
(
*
bio_orig
)
->
bi_bdev
;
bio
->
bi_flags
|=
(
1
<<
BIO_BOUNCED
);
bio
->
bi_sector
=
(
*
bio_orig
)
->
bi_sector
;
bio
->
bi_sector
=
(
*
bio_orig
)
->
bi_sector
;
bio
->
bi_rw
=
(
*
bio_orig
)
->
bi_rw
;
bio
->
bi_rw
=
(
*
bio_orig
)
->
bi_rw
;
...
@@ -454,14 +434,12 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
...
@@ -454,14 +434,12 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
bio
->
bi_size
=
(
*
bio_orig
)
->
bi_size
;
bio
->
bi_size
=
(
*
bio_orig
)
->
bi_size
;
if
(
pool
==
page_pool
)
{
if
(
pool
==
page_pool
)
{
if
(
rw
&
WRITE
)
bio
->
bi_end_io
=
bounce_end_io_write
;
bio
->
bi_end_io
=
bounce_end_io_write
;
if
(
rw
==
READ
)
else
bio
->
bi_end_io
=
bounce_end_io_read
;
bio
->
bi_end_io
=
bounce_end_io_read
;
}
else
{
}
else
{
if
(
rw
&
WRITE
)
bio
->
bi_end_io
=
bounce_end_io_write_isa
;
bio
->
bi_end_io
=
bounce_end_io_write_isa
;
if
(
rw
==
READ
)
else
bio
->
bi_end_io
=
bounce_end_io_read_isa
;
bio
->
bi_end_io
=
bounce_end_io_read_isa
;
}
}
...
@@ -469,6 +447,37 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
...
@@ -469,6 +447,37 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
*
bio_orig
=
bio
;
*
bio_orig
=
bio
;
}
}
inline
void
blk_queue_bounce
(
request_queue_t
*
q
,
struct
bio
**
bio_orig
)
{
mempool_t
*
pool
;
int
bio_gfp
;
BUG_ON
((
*
bio_orig
)
->
bi_idx
);
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments
*/
if
(
!
(
q
->
bounce_gfp
&
GFP_DMA
))
{
if
(
q
->
bounce_pfn
>=
blk_max_pfn
)
return
;
bio_gfp
=
GFP_NOHIGHIO
;
pool
=
page_pool
;
}
else
{
BUG_ON
(
!
isa_page_pool
);
bio_gfp
=
GFP_NOIO
;
pool
=
isa_page_pool
;
}
/*
* slow path
*/
__blk_queue_bounce
(
q
,
bio_orig
,
bio_gfp
,
pool
);
}
#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_HIGHMEM)
#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_HIGHMEM)
void
check_highmem_ptes
(
void
)
void
check_highmem_ptes
(
void
)
{
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment