Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
28d92ef1
Commit
28d92ef1
authored
Oct 27, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
http://gkernel.bkbits.net/net-drivers-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
ae43a826
8baa8006
Changes
29
Hide whitespace changes
Inline
Side-by-side
Showing
29 changed files
with
475 additions
and
290 deletions
+475
-290
Documentation/BK-usage/00-INDEX
Documentation/BK-usage/00-INDEX
+38
-0
Documentation/BK-usage/bk-kernel-howto.txt
Documentation/BK-usage/bk-kernel-howto.txt
+1
-1
drivers/block/deadline-iosched.c
drivers/block/deadline-iosched.c
+6
-0
drivers/block/elevator.c
drivers/block/elevator.c
+52
-19
drivers/block/ll_rw_blk.c
drivers/block/ll_rw_blk.c
+173
-80
drivers/block/umem.c
drivers/block/umem.c
+3
-8
drivers/ide/ide-disk.c
drivers/ide/ide-disk.c
+0
-50
drivers/ide/ide-floppy.c
drivers/ide/ide-floppy.c
+21
-0
drivers/ide/ide.c
drivers/ide/ide.c
+31
-22
drivers/ide/pci/hpt366.c
drivers/ide/pci/hpt366.c
+5
-5
drivers/ide/pci/siimage.c
drivers/ide/pci/siimage.c
+4
-4
drivers/md/linear.c
drivers/md/linear.c
+8
-6
drivers/md/raid0.c
drivers/md/raid0.c
+2
-3
drivers/net/tokenring/3c359.c
drivers/net/tokenring/3c359.c
+1
-1
drivers/net/tulip/xircom_cb.c
drivers/net/tulip/xircom_cb.c
+2
-2
drivers/net/wan/pc300_drv.c
drivers/net/wan/pc300_drv.c
+2
-2
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_lib.c
+2
-2
drivers/scsi/sr_ioctl.c
drivers/scsi/sr_ioctl.c
+2
-4
fs/bio.c
fs/bio.c
+28
-18
fs/direct-io.c
fs/direct-io.c
+2
-2
fs/mpage.c
fs/mpage.c
+6
-2
fs/pipe.c
fs/pipe.c
+2
-2
fs/xfs/pagebuf/page_buf.c
fs/xfs/pagebuf/page_buf.c
+1
-1
include/asm-i386/ide.h
include/asm-i386/ide.h
+1
-0
include/linux/bio.h
include/linux/bio.h
+2
-1
include/linux/blk.h
include/linux/blk.h
+4
-17
include/linux/blkdev.h
include/linux/blkdev.h
+27
-1
include/linux/elevator.h
include/linux/elevator.h
+5
-2
mm/highmem.c
mm/highmem.c
+44
-35
No files found.
Documentation/BK-usage/00-INDEX
0 → 100644
View file @
28d92ef1
bk-kernel-howto.txt: Description of kernel workflow under BitKeeper
bk-make-sum: Create summary of changesets in one repository and not
another, typically in preparation to be sent to an upstream maintainer.
Typical usage:
cd my-updated-repo
bk-make-sum ~/repo/original-repo
mv /tmp/linus.txt ../original-repo.txt
bksend: Create readable text output containing summary of changes, GNU
patch of the changes, and BK metadata of changes (as needed for proper
importing into BitKeeper by an upstream maintainer). This output is
suitable for emailing BitKeeper changes. The recipient of this output
may pipe it directly to 'bk receive'.
bz64wrap: helper script. Uncompressed input is piped to this script,
which compresses its input, and then outputs the uu-/base64-encoded
version of the compressed input.
csets-to-patches: Produces a delta of two BK repositories, in the form
of individual files, each containing a single cset as a GNU patch.
Output is several files, each with the filename "/tmp/rev-$REV.patch"
Typical usage:
cd my-updated-repo
bk changes -L ~/repo/original-repo 2>&1 | \
perl csets-to-patches
cset-to-linus: Produces a delta of two BK repositories, in the form of
changeset descriptions, with 'diffstat' output created for each
individual changset.
Typical usage:
cd my-updated-repo
bk changes -L ~/repo/original-repo 2>&1 | \
perl cset-to-linus > summary.txt
unbz64wrap: Reverse an encoded, compressed data stream created by
bz64wrap into an uncompressed, typically text/plain output.
Documentation/BK-usage/bk-kernel-howto.txt
View file @
28d92ef1
...
...
@@ -32,7 +32,7 @@ land at the right destination... but I'm getting ahead of myself.
Let's start with this progression:
Each BitKeeper source tree on disk is a repository unto itself.
Each repository has a parent.
Each repository has a parent
(except the root/original, of course)
.
Each repository contains a set of a changesets ("csets").
Each cset is one or more changed files, bundled together.
...
...
drivers/block/deadline-iosched.c
View file @
28d92ef1
...
...
@@ -118,6 +118,8 @@ deadline_find_hash(struct deadline_data *dd, sector_t offset)
while
((
entry
=
next
)
!=
hash_list
)
{
next
=
entry
->
next
;
prefetch
(
next
);
drq
=
list_entry_hash
(
entry
);
BUG_ON
(
!
drq
->
hash_valid_count
);
...
...
@@ -191,6 +193,8 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
while
((
entry
=
entry
->
prev
)
!=
sort_list
)
{
__rq
=
list_entry_rq
(
entry
);
prefetch
(
entry
->
prev
);
BUG_ON
(
__rq
->
flags
&
REQ_STARTED
);
if
(
!
(
__rq
->
flags
&
REQ_CMD
))
...
...
@@ -298,6 +302,8 @@ static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
struct
list_head
*
nxt
=
rq
->
queuelist
.
next
;
int
this_rq_cost
;
prefetch
(
nxt
);
/*
* take it off the sort and fifo list, move
* to dispatch queue
...
...
drivers/block/elevator.c
View file @
28d92ef1
...
...
@@ -272,13 +272,27 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
e
->
elevator_merge_req_fn
(
q
,
rq
,
next
);
}
/*
* add_request and next_request are required to be supported, naturally
*/
void
__elv_add_request
(
request_queue_t
*
q
,
struct
request
*
rq
,
struct
list_head
*
insert_here
)
void
__elv_add_request
(
request_queue_t
*
q
,
struct
request
*
rq
,
int
at_end
,
int
plug
)
{
struct
list_head
*
insert
=
&
q
->
queue_head
;
if
(
at_end
)
insert
=
insert
->
prev
;
if
(
plug
)
blk_plug_device
(
q
);
q
->
elevator
.
elevator_add_req_fn
(
q
,
rq
,
insert
);
}
void
elv_add_request
(
request_queue_t
*
q
,
struct
request
*
rq
,
int
at_end
,
int
plug
)
{
q
->
elevator
.
elevator_add_req_fn
(
q
,
rq
,
insert_here
);
unsigned
long
flags
;
spin_lock_irqsave
(
q
->
queue_lock
,
flags
);
__elv_add_request
(
q
,
rq
,
at_end
,
plug
);
spin_unlock_irqrestore
(
q
->
queue_lock
,
flags
);
}
static
inline
struct
request
*
__elv_next_request
(
request_queue_t
*
q
)
...
...
@@ -289,8 +303,14 @@ static inline struct request *__elv_next_request(request_queue_t *q)
struct
request
*
elv_next_request
(
request_queue_t
*
q
)
{
struct
request
*
rq
;
int
ret
;
while
((
rq
=
__elv_next_request
(
q
)))
{
/*
* just mark as started even if we don't start it, a request
* that has been delayed should not be passed by new incoming
* requests
*/
rq
->
flags
|=
REQ_STARTED
;
if
(
&
rq
->
queuelist
==
q
->
last_merge
)
...
...
@@ -299,20 +319,22 @@ struct request *elv_next_request(request_queue_t *q)
if
((
rq
->
flags
&
REQ_DONTPREP
)
||
!
q
->
prep_rq_fn
)
break
;
/*
* all ok, break and return it
*/
if
(
!
q
->
prep_rq_fn
(
q
,
rq
))
ret
=
q
->
prep_rq_fn
(
q
,
rq
);
if
(
ret
==
BLKPREP_OK
)
{
break
;
/*
* prep said no-go, kill it
*/
blkdev_dequeue_request
(
rq
);
if
(
end_that_request_first
(
rq
,
0
,
rq
->
nr_sectors
))
BUG
();
end_that_request_last
(
rq
);
}
else
if
(
ret
==
BLKPREP_DEFER
)
{
rq
=
NULL
;
break
;
}
else
if
(
ret
==
BLKPREP_KILL
)
{
blkdev_dequeue_request
(
rq
);
rq
->
flags
|=
REQ_QUIET
;
while
(
end_that_request_first
(
rq
,
0
,
rq
->
nr_sectors
))
;
end_that_request_last
(
rq
);
}
else
{
printk
(
"%s: bad return=%d
\n
"
,
__FUNCTION__
,
ret
);
break
;
}
}
return
rq
;
...
...
@@ -322,6 +344,16 @@ void elv_remove_request(request_queue_t *q, struct request *rq)
{
elevator_t
*
e
=
&
q
->
elevator
;
/*
* the main clearing point for q->last_merge is on retrieval of
* request by driver (it calls elv_next_request()), but it _can_
* also happen here if a request is added to the queue but later
* deleted without ever being given to driver (merged with another
* request).
*/
if
(
&
rq
->
queuelist
==
q
->
last_merge
)
q
->
last_merge
=
NULL
;
if
(
e
->
elevator_remove_req_fn
)
e
->
elevator_remove_req_fn
(
q
,
rq
);
}
...
...
@@ -357,6 +389,7 @@ module_init(elevator_global_init);
EXPORT_SYMBOL
(
elevator_noop
);
EXPORT_SYMBOL
(
elv_add_request
);
EXPORT_SYMBOL
(
__elv_add_request
);
EXPORT_SYMBOL
(
elv_next_request
);
EXPORT_SYMBOL
(
elv_remove_request
);
...
...
drivers/block/ll_rw_blk.c
View file @
28d92ef1
...
...
@@ -242,6 +242,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
q
->
backing_dev_info
.
state
=
0
;
blk_queue_max_sectors
(
q
,
MAX_SECTORS
);
blk_queue_hardsect_size
(
q
,
512
);
blk_queue_dma_alignment
(
q
,
511
);
/*
* by default assume old behaviour and bounce for any highmem page
...
...
@@ -408,6 +409,21 @@ void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
q
->
seg_boundary_mask
=
mask
;
}
/**
* blk_queue_dma_alignment - set dma length and memory alignment
* @q: the request queue for the device
* @dma_mask: alignment mask
*
* description:
* set required memory and length aligment for direct dma transactions.
* this is used when buiding direct io requests for the queue.
*
**/
void
blk_queue_dma_alignment
(
request_queue_t
*
q
,
int
mask
)
{
q
->
dma_alignment
=
mask
;
}
void
blk_queue_assign_lock
(
request_queue_t
*
q
,
spinlock_t
*
lock
)
{
spin_lock_init
(
lock
);
...
...
@@ -549,7 +565,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
return
;
}
list_del
(
&
rq
->
queuelist
);
list_del
_init
(
&
rq
->
queuelist
);
rq
->
flags
&=
~
REQ_QUEUED
;
rq
->
tag
=
-
1
;
...
...
@@ -633,13 +649,13 @@ void blk_queue_invalidate_tags(request_queue_t *q)
if
(
rq
->
tag
==
-
1
)
{
printk
(
"bad tag found on list
\n
"
);
list_del
(
&
rq
->
queuelist
);
list_del
_init
(
&
rq
->
queuelist
);
rq
->
flags
&=
~
REQ_QUEUED
;
}
else
blk_queue_end_tag
(
q
,
rq
);
rq
->
flags
&=
~
REQ_STARTED
;
elv_add_request
(
q
,
rq
,
0
);
__elv_add_request
(
q
,
rq
,
0
,
0
);
}
}
...
...
@@ -655,14 +671,19 @@ static char *rq_flags[] = {
"REQ_PC"
,
"REQ_BLOCK_PC"
,
"REQ_SENSE"
,
"REQ_FAILED"
,
"REQ_QUIET"
,
"REQ_SPECIAL"
"REQ_DRIVE_CMD"
,
"REQ_DRIVE_TASK"
,
"REQ_DRIVE_TASKFILE"
,
};
void
blk_dump_rq_flags
(
struct
request
*
rq
,
char
*
msg
)
{
int
bit
;
printk
(
"%s: dev %02x:%02x: "
,
msg
,
major
(
rq
->
rq_dev
),
minor
(
rq
->
rq_dev
));
printk
(
"%s: dev %02x:%02x:
flags =
"
,
msg
,
major
(
rq
->
rq_dev
),
minor
(
rq
->
rq_dev
));
bit
=
0
;
do
{
if
(
rq
->
flags
&
(
1
<<
bit
))
...
...
@@ -670,10 +691,17 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
bit
++
;
}
while
(
bit
<
__REQ_NR_BITS
);
printk
(
"sector %llu, nr/cnr %lu/%u
\n
"
,
(
unsigned
long
long
)
rq
->
sector
,
printk
(
"
\n
sector %llu, nr/cnr %lu/%u
\n
"
,
(
unsigned
long
long
)
rq
->
sector
,
rq
->
nr_sectors
,
rq
->
current_nr_sectors
);
printk
(
"bio %p, biotail %p
\n
"
,
rq
->
bio
,
rq
->
biotail
);
printk
(
"bio %p, biotail %p, buffer %p, data %p, len %u
\n
"
,
rq
->
bio
,
rq
->
biotail
,
rq
->
buffer
,
rq
->
data
,
rq
->
data_len
);
if
(
rq
->
flags
&
(
REQ_BLOCK_PC
|
REQ_PC
))
{
printk
(
"cdb: "
);
for
(
bit
=
0
;
bit
<
sizeof
(
rq
->
cmd
);
bit
++
)
printk
(
"%02x "
,
rq
->
cmd
[
bit
]);
printk
(
"
\n
"
);
}
}
void
blk_recount_segments
(
request_queue_t
*
q
,
struct
bio
*
bio
)
...
...
@@ -1104,7 +1132,7 @@ static int __blk_cleanup_queue(struct request_list *list)
while
(
!
list_empty
(
head
))
{
rq
=
list_entry
(
head
->
next
,
struct
request
,
queuelist
);
list_del
(
&
rq
->
queuelist
);
list_del
_init
(
&
rq
->
queuelist
);
kmem_cache_free
(
request_cachep
,
rq
);
i
++
;
}
...
...
@@ -1264,13 +1292,20 @@ static struct request *get_request(request_queue_t *q, int rw)
if
(
!
list_empty
(
&
rl
->
free
))
{
rq
=
blkdev_free_rq
(
&
rl
->
free
);
list_del
(
&
rq
->
queuelist
);
list_del_init
(
&
rq
->
queuelist
);
rq
->
ref_count
=
1
;
rl
->
count
--
;
if
(
rl
->
count
<
queue_congestion_on_threshold
())
set_queue_congested
(
q
,
rw
);
rq
->
flags
=
0
;
rq
->
rq_status
=
RQ_ACTIVE
;
rq
->
errors
=
0
;
rq
->
special
=
NULL
;
rq
->
buffer
=
NULL
;
rq
->
data
=
NULL
;
rq
->
sense
=
NULL
;
rq
->
waiting
=
NULL
;
rq
->
bio
=
rq
->
biotail
=
NULL
;
rq
->
q
=
q
;
rq
->
rl
=
rl
;
}
...
...
@@ -1466,26 +1501,22 @@ static inline void add_request(request_queue_t * q, struct request * req,
* elevator indicated where it wants this request to be
* inserted at elevator_merge time
*/
__elv_add_request
(
q
,
req
,
insert_here
);
__elv_add_request
_pos
(
q
,
req
,
insert_here
);
}
/*
* Must be called with queue lock held and interrupts disabled
*/
void
blk_put_request
(
struct
request
*
req
)
void
__blk_put_request
(
request_queue_t
*
q
,
struct
request
*
req
)
{
struct
request_list
*
rl
=
req
->
rl
;
request_queue_t
*
q
=
req
->
q
;
if
(
unlikely
(
--
req
->
ref_count
))
return
;
if
(
unlikely
(
!
q
))
return
;
req
->
rq_status
=
RQ_INACTIVE
;
req
->
q
=
NULL
;
req
->
rl
=
NULL
;
if
(
q
)
{
if
(
q
->
last_merge
==
&
req
->
queuelist
)
q
->
last_merge
=
NULL
;
}
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
...
...
@@ -1493,6 +1524,8 @@ void blk_put_request(struct request *req)
if
(
rl
)
{
int
rw
=
0
;
BUG_ON
(
!
list_empty
(
&
req
->
queuelist
));
list_add
(
&
req
->
queuelist
,
&
rl
->
free
);
if
(
rl
==
&
q
->
rq
[
WRITE
])
...
...
@@ -1510,6 +1543,23 @@ void blk_put_request(struct request *req)
}
}
void
blk_put_request
(
struct
request
*
req
)
{
request_queue_t
*
q
=
req
->
q
;
/*
* if req->q isn't set, this request didnt originate from the
* block layer, so it's safe to just disregard it
*/
if
(
q
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
q
->
queue_lock
,
flags
);
__blk_put_request
(
q
,
req
);
spin_unlock_irqrestore
(
q
->
queue_lock
,
flags
);
}
}
/**
* blk_congestion_wait - wait for a queue to become uncongested
* @rw: READ or WRITE
...
...
@@ -1568,7 +1618,7 @@ static void attempt_merge(request_queue_t *q, struct request *req,
elv_merge_requests
(
q
,
req
,
next
);
blkdev_dequeue_request
(
next
);
blk_put_request
(
next
);
__blk_put_request
(
q
,
next
);
}
}
...
...
@@ -1761,7 +1811,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
add_request
(
q
,
req
,
insert_here
);
out:
if
(
freereq
)
blk_put_request
(
freereq
);
__blk_put_request
(
q
,
freereq
);
spin_unlock_irq
(
q
->
queue_lock
);
return
0
;
...
...
@@ -1891,7 +1941,6 @@ int submit_bio(int rw, struct bio *bio)
{
int
count
=
bio_sectors
(
bio
);
BUG_ON
(
!
bio
->
bi_end_io
);
BIO_BUG_ON
(
!
bio
->
bi_size
);
BIO_BUG_ON
(
!
bio
->
bi_io_vec
);
bio
->
bi_rw
=
rw
;
...
...
@@ -1908,6 +1957,9 @@ inline void blk_recalc_rq_segments(struct request *rq)
struct
bio
*
bio
;
int
nr_phys_segs
,
nr_hw_segs
;
if
(
!
rq
->
bio
)
return
;
rq
->
buffer
=
bio_data
(
rq
->
bio
);
nr_phys_segs
=
nr_hw_segs
=
0
;
...
...
@@ -1925,7 +1977,7 @@ inline void blk_recalc_rq_segments(struct request *rq)
inline
void
blk_recalc_rq_sectors
(
struct
request
*
rq
,
int
nsect
)
{
if
(
rq
->
bio
)
{
if
(
blk_fs_request
(
rq
)
)
{
rq
->
hard_sector
+=
nsect
;
rq
->
nr_sectors
=
rq
->
hard_nr_sectors
-=
nsect
;
rq
->
sector
=
rq
->
hard_sector
;
...
...
@@ -1944,27 +1996,19 @@ inline void blk_recalc_rq_sectors(struct request *rq, int nsect)
}
}
/**
* end_that_request_first - end I/O on one buffer.
* @req: the request being processed
* @uptodate: 0 for I/O error
* @nr_sectors: number of sectors to end I/O on
*
* Description:
* Ends I/O on a number of sectors attached to @req, and sets it up
* for the next range of segments (if any) in the cluster.
*
* Return:
* 0 - we are done with this request, call end_that_request_last()
* 1 - still buffers pending for this request
**/
int
end_that_request_first
(
struct
request
*
req
,
int
uptodate
,
int
nr_sectors
)
static
int
__end_that_request_first
(
struct
request
*
req
,
int
uptodate
,
int
nr_bytes
)
{
int
total_
nsect
=
0
,
error
=
0
;
int
total_
bytes
,
bio_nbytes
,
error
=
0
,
next_idx
=
0
;
struct
bio
*
bio
;
req
->
errors
=
0
;
/*
* for a REQ_BLOCK_PC request, we want to carry any eventual
* sense key with us all the way through
*/
if
(
!
blk_pc_request
(
req
))
req
->
errors
=
0
;
if
(
!
uptodate
)
{
error
=
-
EIO
;
if
(
!
(
req
->
flags
&
REQ_QUIET
))
...
...
@@ -1973,56 +2017,56 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
(
unsigned
long
long
)
req
->
sector
);
}
total_bytes
=
bio_nbytes
=
0
;
while
((
bio
=
req
->
bio
))
{
int
n
ew_bio
=
0
,
nsect
;
int
n
bytes
;
if
(
unlikely
(
bio
->
bi_idx
>=
bio
->
bi_vcnt
))
{
printk
(
"%s: bio idx %d >= vcnt %d
\n
"
,
__FUNCTION__
,
if
(
nr_bytes
>=
bio
->
bi_size
)
{
req
->
bio
=
bio
->
bi_next
;
nbytes
=
bio
->
bi_size
;
bio_endio
(
bio
,
nbytes
,
error
);
next_idx
=
0
;
bio_nbytes
=
0
;
}
else
{
int
idx
=
bio
->
bi_idx
+
next_idx
;
if
(
unlikely
(
bio
->
bi_idx
>=
bio
->
bi_vcnt
))
{
blk_dump_rq_flags
(
req
,
"__end_that"
);
printk
(
"%s: bio idx %d >= vcnt %d
\n
"
,
__FUNCTION__
,
bio
->
bi_idx
,
bio
->
bi_vcnt
);
break
;
}
break
;
}
BIO_BUG_ON
(
bio_iovec
(
bio
)
->
bv_len
>
bio
->
bi_size
);
nbytes
=
bio_iovec_idx
(
bio
,
idx
)
->
bv_len
;
BIO_BUG_ON
(
nbytes
>
bio
->
bi_size
);
/*
* not a complete bvec done
*/
nsect
=
bio_iovec
(
bio
)
->
bv_len
>>
9
;
if
(
unlikely
(
nsect
>
nr_sectors
))
{
int
partial
=
nr_sectors
<<
9
;
bio_iovec
(
bio
)
->
bv_offset
+=
partial
;
bio_iovec
(
bio
)
->
bv_len
-=
partial
;
bio_endio
(
bio
,
partial
,
error
);
total_nsect
+=
nr_sectors
;
break
;
}
/*
* not a complete bvec done
*/
if
(
unlikely
(
nbytes
>
nr_bytes
))
{
bio_iovec
(
bio
)
->
bv_offset
+=
nr_bytes
;
bio_iovec
(
bio
)
->
bv_len
-=
nr_bytes
;
bio_nbytes
+=
nr_bytes
;
total_bytes
+=
nr_bytes
;
break
;
}
/*
* we are ending the last part of the bio, advance req pointer
*/
if
((
nsect
<<
9
)
>=
bio
->
bi_size
)
{
req
->
bio
=
bio
->
bi_next
;
new_bio
=
1
;
/*
* advance to the next vector
*/
next_idx
++
;
bio_nbytes
+=
nbytes
;
}
bio_endio
(
bio
,
nsect
<<
9
,
error
);
total_nsect
+=
nsect
;
nr_sectors
-=
nsect
;
/*
* if we didn't advance the req->bio pointer, advance bi_idx
* to indicate we are now on the next bio_vec
*/
if
(
!
new_bio
)
bio
->
bi_idx
++
;
total_bytes
+=
nbytes
;
nr_bytes
-=
nbytes
;
if
((
bio
=
req
->
bio
))
{
/*
* end more in this run, or just return 'not-done'
*/
if
(
unlikely
(
nr_
sector
s
<=
0
))
if
(
unlikely
(
nr_
byte
s
<=
0
))
break
;
}
}
...
...
@@ -2036,17 +2080,64 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
/*
* if the request wasn't completed, update state
*/
blk_recalc_rq_sectors
(
req
,
total_nsect
);
if
(
bio_nbytes
)
{
bio_endio
(
bio
,
bio_nbytes
,
error
);
req
->
bio
->
bi_idx
+=
next_idx
;
}
blk_recalc_rq_sectors
(
req
,
total_bytes
>>
9
);
blk_recalc_rq_segments
(
req
);
return
1
;
}
/**
* end_that_request_first - end I/O on a request
* @req: the request being processed
* @uptodate: 0 for I/O error
* @nr_sectors: number of sectors to end I/O on
*
* Description:
* Ends I/O on a number of sectors attached to @req, and sets it up
* for the next range of segments (if any) in the cluster.
*
* Return:
* 0 - we are done with this request, call end_that_request_last()
* 1 - still buffers pending for this request
**/
int
end_that_request_first
(
struct
request
*
req
,
int
uptodate
,
int
nr_sectors
)
{
return
__end_that_request_first
(
req
,
uptodate
,
nr_sectors
<<
9
);
}
/**
* end_that_request_chunk - end I/O on a request
* @req: the request being processed
* @uptodate: 0 for I/O error
* @nr_bytes: number of bytes to complete
*
* Description:
* Ends I/O on a number of bytes attached to @req, and sets it up
* for the next range of segments (if any). Like end_that_request_first(),
* but deals with bytes instead of sectors.
*
* Return:
* 0 - we are done with this request, call end_that_request_last()
* 1 - still buffers pending for this request
**/
int
end_that_request_chunk
(
struct
request
*
req
,
int
uptodate
,
int
nr_bytes
)
{
return
__end_that_request_first
(
req
,
uptodate
,
nr_bytes
);
}
/*
* queue lock must be held
*/
void
end_that_request_last
(
struct
request
*
req
)
{
if
(
req
->
waiting
)
complete
(
req
->
waiting
);
blk_put_request
(
req
);
__blk_put_request
(
req
->
q
,
req
);
}
int
__init
blk_dev_init
(
void
)
...
...
@@ -2092,6 +2183,7 @@ int __init blk_dev_init(void)
};
EXPORT_SYMBOL
(
end_that_request_first
);
EXPORT_SYMBOL
(
end_that_request_chunk
);
EXPORT_SYMBOL
(
end_that_request_last
);
EXPORT_SYMBOL
(
blk_init_queue
);
EXPORT_SYMBOL
(
bdev_get_queue
);
...
...
@@ -2112,6 +2204,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments);
EXPORT_SYMBOL
(
blk_queue_max_segment_size
);
EXPORT_SYMBOL
(
blk_queue_hardsect_size
);
EXPORT_SYMBOL
(
blk_queue_segment_boundary
);
EXPORT_SYMBOL
(
blk_queue_dma_alignment
);
EXPORT_SYMBOL
(
blk_rq_map_sg
);
EXPORT_SYMBOL
(
blk_nohighio
);
EXPORT_SYMBOL
(
blk_dump_rq_flags
);
...
...
drivers/block/umem.c
View file @
28d92ef1
...
...
@@ -548,12 +548,7 @@ static void process_page(unsigned long data)
return_bio
=
bio
->
bi_next
;
bio
->
bi_next
=
NULL
;
/* should use bio_endio(), however already cleared
* BIO_UPTODATE. so set bio->bi_size = 0 manually to indicate
* completely done
*/
bio
->
bi_size
=
0
;
bio
->
bi_end_io
(
bio
,
bytes
,
0
);
bio_endio
(
bio
,
bio
->
bi_size
,
0
);
}
}
...
...
@@ -1041,7 +1036,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
spin_lock_init
(
&
card
->
lock
);
dev
->
driver_data
=
card
;
pci_set_drvdata
(
dev
,
card
)
;
if
(
pci_write_cmd
!=
0x0F
)
/* If not Memory Write & Invalidate */
pci_write_cmd
=
0x07
;
/* then Memory Write command */
...
...
@@ -1100,7 +1095,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
*/
static
void
mm_pci_remove
(
struct
pci_dev
*
dev
)
{
struct
cardinfo
*
card
=
dev
->
driver_data
;
struct
cardinfo
*
card
=
pci_get_drvdata
(
dev
)
;
tasklet_kill
(
&
card
->
tasklet
);
iounmap
(
card
->
csr_remap
);
...
...
drivers/ide/ide-disk.c
View file @
28d92ef1
...
...
@@ -1610,56 +1610,6 @@ static void idedisk_add_settings(ide_drive_t *drive)
#endif
}
static
int
idedisk_suspend
(
struct
device
*
dev
,
u32
state
,
u32
level
)
{
ide_drive_t
*
drive
=
dev
->
driver_data
;
printk
(
"Suspending device %p
\n
"
,
dev
->
driver_data
);
/* I hope that every freeze operation from the upper levels have
* already been done...
*/
if
(
level
!=
SUSPEND_SAVE_STATE
)
return
0
;
BUG_ON
(
in_interrupt
());
printk
(
"Waiting for commands to finish
\n
"
);
/* wait until all commands are finished */
/* FIXME: waiting for spinlocks should be done instead. */
if
(
!
(
HWGROUP
(
drive
)))
printk
(
"No hwgroup?
\n
"
);
while
(
HWGROUP
(
drive
)
->
handler
)
yield
();
/* set the drive to standby */
printk
(
KERN_INFO
"suspending: %s "
,
drive
->
name
);
if
(
drive
->
driver
)
{
if
(
drive
->
driver
->
standby
)
drive
->
driver
->
standby
(
drive
);
}
drive
->
blocked
=
1
;
while
(
HWGROUP
(
drive
)
->
handler
)
yield
();
return
0
;
}
static
int
idedisk_resume
(
struct
device
*
dev
,
u32
level
)
{
ide_drive_t
*
drive
=
dev
->
driver_data
;
if
(
level
!=
RESUME_RESTORE_STATE
)
return
0
;
if
(
!
drive
->
blocked
)
panic
(
"ide: Resume but not suspended?
\n
"
);
drive
->
blocked
=
0
;
return
0
;
}
/* This is just a hook for the overall driver tree.
*/
...
...
drivers/ide/ide-floppy.c
View file @
28d92ef1
...
...
@@ -1238,6 +1238,21 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t
set_bit
(
PC_DMA_RECOMMENDED
,
&
pc
->
flags
);
}
static
int
idefloppy_blockpc_cmd
(
idefloppy_floppy_t
*
floppy
,
idefloppy_pc_t
*
pc
,
struct
request
*
rq
)
{
/*
* just support eject for now, it would not be hard to make the
* REQ_BLOCK_PC support fully-featured
*/
if
(
rq
->
cmd
[
0
]
!=
IDEFLOPPY_START_STOP_CMD
)
return
1
;
idefloppy_init_pc
(
pc
);
memcpy
(
pc
->
c
,
rq
->
cmd
,
sizeof
(
pc
->
c
));
return
0
;
}
/*
* idefloppy_do_request is our request handling function.
*/
...
...
@@ -1280,6 +1295,12 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
idefloppy_create_rw_cmd
(
floppy
,
pc
,
rq
,
block
);
}
else
if
(
rq
->
flags
&
REQ_SPECIAL
)
{
pc
=
(
idefloppy_pc_t
*
)
rq
->
buffer
;
}
else
if
(
rq
->
flags
&
REQ_BLOCK_PC
)
{
pc
=
idefloppy_next_pc_storage
(
drive
);
if
(
idefloppy_blockpc_cmd
(
floppy
,
pc
,
rq
))
{
idefloppy_do_end_request
(
drive
,
0
,
0
);
return
ide_stopped
;
}
}
else
{
blk_dump_rq_flags
(
rq
,
"ide-floppy: unsupported command in queue"
);
...
...
drivers/ide/ide.c
View file @
28d92ef1
...
...
@@ -878,13 +878,12 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
{
ide_startstop_t
startstop
;
unsigned
long
block
;
ide_hwif_t
*
hwif
=
HWIF
(
drive
);
BUG_ON
(
!
(
rq
->
flags
&
REQ_STARTED
));
#ifdef DEBUG
printk
(
"%s: start_request: current=0x%08lx
\n
"
,
hwif
->
name
,
(
unsigned
long
)
rq
);
HWIF
(
drive
)
->
name
,
(
unsigned
long
)
rq
);
#endif
/* bail early if we've exceeded max_failures */
...
...
@@ -910,7 +909,7 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
block
=
1
;
/* redirect MBR access to EZ-Drive partn table */
#if (DISK_RECOVERY_TIME > 0)
while
((
read_timer
()
-
hwif
->
last_time
)
<
DISK_RECOVERY_TIME
);
while
((
read_timer
()
-
HWIF
(
drive
)
->
last_time
)
<
DISK_RECOVERY_TIME
);
#endif
SELECT_DRIVE
(
drive
);
...
...
@@ -1128,9 +1127,15 @@ void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
break
;
}
/*
* we know that the queue isn't empty, but this can happen
* if the q->prep_rq_fn() decides to kill a request
*/
rq
=
elv_next_request
(
&
drive
->
queue
);
if
(
!
rq
)
if
(
!
rq
)
{
hwgroup
->
busy
=
!!
ata_pending_commands
(
drive
);
break
;
}
if
(
!
rq
->
bio
&&
ata_pending_commands
(
drive
))
break
;
...
...
@@ -1515,10 +1520,8 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
{
unsigned
long
flags
;
ide_hwgroup_t
*
hwgroup
=
HWGROUP
(
drive
);
unsigned
int
major
=
HWIF
(
drive
)
->
major
;
request_queue_t
*
q
=
&
drive
->
queue
;
struct
list_head
*
queue_head
=
&
q
->
queue_head
;
DECLARE_COMPLETION
(
wait
);
int
insert_end
=
1
,
err
;
#ifdef CONFIG_BLK_DEV_PDC4030
if
(
HWIF
(
drive
)
->
chipset
==
ide_pdc4030
&&
rq
->
buffer
!=
NULL
)
...
...
@@ -1540,29 +1543,35 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
}
rq
->
rq_disk
=
drive
->
disk
;
if
(
action
==
ide_wait
)
/*
* we need to hold an extra reference to request for safe inspection
* after completion
*/
if
(
action
==
ide_wait
)
{
rq
->
ref_count
++
;
rq
->
waiting
=
&
wait
;
}
spin_lock_irqsave
(
&
ide_lock
,
flags
);
if
(
blk_queue_empty
(
q
)
||
action
==
ide_preempt
)
{
if
(
action
==
ide_preempt
)
hwgroup
->
rq
=
NULL
;
}
else
{
if
(
action
==
ide_wait
||
action
==
ide_end
)
{
queue_head
=
queue_head
->
prev
;
}
else
queue_head
=
queue_head
->
next
;
if
(
action
==
ide_preempt
)
{
hwgroup
->
rq
=
NULL
;
insert_end
=
0
;
}
q
->
elevator
.
elevator_add_req_fn
(
q
,
rq
,
queue_head
);
__elv_add_request
(
&
drive
->
queue
,
rq
,
insert_end
,
0
);
ide_do_request
(
hwgroup
,
0
);
spin_unlock_irqrestore
(
&
ide_lock
,
flags
);
err
=
0
;
if
(
action
==
ide_wait
)
{
/* wait for it to be serviced */
wait_for_completion
(
&
wait
);
/* return -EIO if errors */
return
rq
->
errors
?
-
EIO
:
0
;
if
(
rq
->
errors
)
err
=
-
EIO
;
blk_put_request
(
rq
);
}
return
0
;
return
err
;
}
EXPORT_SYMBOL
(
ide_do_drive_cmd
);
...
...
@@ -3369,7 +3378,7 @@ int ide_register_driver(ide_driver_t *driver)
list_del_init
(
&
drive
->
list
);
ata_attach
(
drive
);
}
driver
->
gen_driver
.
name
=
driver
->
name
;
driver
->
gen_driver
.
name
=
(
char
*
)
driver
->
name
;
driver
->
gen_driver
.
bus
=
&
ide_bus_type
;
driver
->
gen_driver
.
remove
=
ide_drive_remove
;
return
driver_register
(
&
driver
->
gen_driver
);
...
...
drivers/ide/pci/hpt366.c
View file @
28d92ef1
...
...
@@ -316,7 +316,7 @@ static void hpt366_tune_chipset (ide_drive_t *drive, u8 xferspeed)
#endif
reg2
=
pci_bus_clock_list
(
speed
,
(
struct
chipset_bus_clock_list_entry
*
)
dev
->
driver_data
);
(
struct
chipset_bus_clock_list_entry
*
)
pci_get_drvdata
(
dev
)
);
/*
* Disable on-chip PIO FIFO/buffer
* (to avoid problems handling I/O errors later)
...
...
@@ -369,7 +369,7 @@ static void hpt370_tune_chipset (ide_drive_t *drive, u8 xferspeed)
list_conf
=
pci_bus_clock_list
(
speed
,
(
struct
chipset_bus_clock_list_entry
*
)
dev
->
driver_data
);
pci_get_drvdata
(
dev
)
);
pci_read_config_dword
(
dev
,
drive_pci
,
&
drive_conf
);
list_conf
=
(
list_conf
&
~
conf_mask
)
|
(
drive_conf
&
conf_mask
);
...
...
@@ -401,7 +401,7 @@ static void hpt372_tune_chipset (ide_drive_t *drive, u8 xferspeed)
list_conf
=
pci_bus_clock_list
(
speed
,
(
struct
chipset_bus_clock_list_entry
*
)
dev
->
driver_data
);
pci_get_drvdata
(
dev
)
);
pci_read_config_dword
(
dev
,
drive_pci
,
&
drive_conf
);
list_conf
=
(
list_conf
&
~
conf_mask
)
|
(
drive_conf
&
conf_mask
);
if
(
speed
<
XFER_MW_DMA_0
)
...
...
@@ -841,7 +841,7 @@ static int __init init_hpt37x(struct pci_dev *dev)
* don't like to use the PLL because it will cause glitches
* on PRST/SRST when the HPT state engine gets reset.
*/
if
(
dev
->
driver_data
)
if
(
pci_get_drvdata
(
dev
)
)
goto
init_hpt37X_done
;
/*
...
...
@@ -923,7 +923,7 @@ static int __init init_hpt366 (struct pci_dev *dev)
break
;
}
if
(
!
dev
->
driver_data
)
if
(
!
pci_get_drvdata
(
dev
)
)
{
printk
(
KERN_ERR
"hpt366: unknown bus timing.
\n
"
);
return
-
EOPNOTSUPP
;
...
...
drivers/ide/pci/siimage.c
View file @
28d92ef1
...
...
@@ -30,8 +30,8 @@ static int n_siimage_devs;
static
char
*
print_siimage_get_info
(
char
*
buf
,
struct
pci_dev
*
dev
,
int
index
)
{
char
*
p
=
buf
;
u8
mmio
=
(
dev
->
driver_data
!=
NULL
)
?
1
:
0
;
u32
bmdma
=
(
mmio
)
?
((
u32
)
dev
->
driver_data
)
:
u8
mmio
=
(
pci_get_drvdata
(
dev
)
!=
NULL
)
?
1
:
0
;
u32
bmdma
=
(
mmio
)
?
((
u32
)
pci_get_drvdata
(
dev
)
)
:
(
pci_resource_start
(
dev
,
4
));
p
+=
sprintf
(
p
,
"
\n
Controller: %d
\n
"
,
index
);
...
...
@@ -769,14 +769,14 @@ static void __init init_iops_siimage (ide_hwif_t *hwif)
if
((
dev
->
device
==
PCI_DEVICE_ID_SII_3112
)
&&
(
!
(
class_rev
)))
hwif
->
rqsize
=
16
;
if
(
dev
->
driver_data
==
NULL
)
if
(
pci_get_drvdata
(
dev
)
==
NULL
)
return
;
init_mmio_iops_siimage
(
hwif
);
}
static
unsigned
int
__init
ata66_siimage
(
ide_hwif_t
*
hwif
)
{
if
(
hwif
->
pci_dev
->
driver_data
==
NULL
)
{
if
(
pci_get_drvdata
(
hwif
->
pci_dev
)
==
NULL
)
{
u8
ata66
=
0
;
pci_read_config_byte
(
hwif
->
pci_dev
,
SELREG
(
0
),
&
ata66
);
return
(
ata66
&
0x01
)
?
1
:
0
;
...
...
drivers/md/linear.c
View file @
28d92ef1
...
...
@@ -52,19 +52,21 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
* @bio: the buffer head that's been built up so far
* @biovec: the request that could be merged to it.
*
* Return 1 if the merge is not permitted (because the
* result would cross a device boundary), 0 otherwise.
* Return amount of bytes we can take at this offset
*/
static
int
linear_mergeable_bvec
(
request_queue_t
*
q
,
struct
bio
*
bio
,
struct
bio_vec
*
biovec
)
{
mddev_t
*
mddev
=
q
->
queuedata
;
dev_info_t
*
dev0
,
*
dev1
;
dev_info_t
*
dev0
;
int
maxsectors
,
bio_sectors
=
(
bio
->
bi_size
+
biovec
->
bv_len
)
>>
9
;
dev0
=
which_dev
(
mddev
,
bio
->
bi_sector
);
dev1
=
which_dev
(
mddev
,
bio
->
bi_sector
+
((
bio
->
bi_size
+
biovec
->
bv_len
-
1
)
>>
9
));
maxsectors
=
(
dev0
->
size
<<
1
)
-
(
bio
->
bi_sector
-
(
dev0
->
offset
<<
1
));
return
dev0
!=
dev1
;
if
(
bio_sectors
<=
maxsectors
)
return
biovec
->
bv_len
;
return
(
maxsectors
<<
9
)
-
bio
->
bi_size
;
}
static
int
linear_run
(
mddev_t
*
mddev
)
...
...
drivers/md/raid0.c
View file @
28d92ef1
...
...
@@ -168,8 +168,7 @@ static int create_strip_zones (mddev_t *mddev)
* @bio: the buffer head that's been built up so far
* @biovec: the request that could be merged to it.
*
* Return 1 if the merge is not permitted (because the
* result would cross a chunk boundary), 0 otherwise.
* Return amount of bytes we can accept at this offset
*/
static
int
raid0_mergeable_bvec
(
request_queue_t
*
q
,
struct
bio
*
bio
,
struct
bio_vec
*
biovec
)
{
...
...
@@ -182,7 +181,7 @@ static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_
block
=
bio
->
bi_sector
>>
1
;
bio_sz
=
(
bio
->
bi_size
+
biovec
->
bv_len
)
>>
10
;
return
chunk_size
<
((
block
&
(
chunk_size
-
1
))
+
bio_sz
)
;
return
(
chunk_size
-
((
block
&
(
chunk_size
-
1
))
+
bio_sz
))
<<
10
;
}
static
int
raid0_run
(
mddev_t
*
mddev
)
...
...
drivers/net/tokenring/3c359.c
View file @
28d92ef1
...
...
@@ -1780,7 +1780,7 @@ static int xl_change_mtu(struct net_device *dev, int mtu)
static
void
__devexit
xl_remove_one
(
struct
pci_dev
*
pdev
)
{
struct
net_device
*
dev
=
p
dev
->
driver_data
;
struct
net_device
*
dev
=
p
ci_get_drvdata
(
pdev
)
;
struct
xl_private
*
xl_priv
=
(
struct
xl_private
*
)
dev
->
priv
;
unregister_trdev
(
dev
);
...
...
drivers/net/tulip/xircom_cb.c
View file @
28d92ef1
...
...
@@ -299,7 +299,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
dev
->
get_stats
=
&
xircom_get_stats
;
dev
->
priv
=
private
;
dev
->
do_ioctl
=
&
private_ioctl
;
p
dev
->
driver_data
=
dev
;
p
ci_set_drvdata
(
pdev
,
dev
)
;
/* start the transmitter to get a heartbeat */
...
...
@@ -326,7 +326,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
*/
static
void
__devexit
xircom_remove
(
struct
pci_dev
*
pdev
)
{
struct
net_device
*
dev
=
p
dev
->
driver_data
;
struct
net_device
*
dev
=
p
ci_get_drvdata
(
pdev
)
;
struct
xircom_private
*
card
;
enter
(
"xircom_remove"
);
if
(
dev
!=
NULL
)
{
...
...
drivers/net/wan/pc300_drv.c
View file @
28d92ef1
...
...
@@ -3556,7 +3556,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
/* Set PCI drv pointer to the card structure */
p
dev
->
driver_data
=
card
;
p
ci_set_drvdata
(
pdev
,
card
)
;
/* Set board type */
switch
(
device_id
)
{
...
...
@@ -3631,7 +3631,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static
void
__devexit
cpc_remove_one
(
struct
pci_dev
*
pdev
)
{
pc300_t
*
card
=
(
pc300_t
*
)
pdev
->
driver_data
;
pc300_t
*
card
=
pci_get_drvdata
(
pdev
)
;
if
(
card
->
hw
.
rambase
!=
0
)
{
int
i
;
...
...
drivers/scsi/scsi_lib.c
View file @
28d92ef1
...
...
@@ -240,7 +240,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
SCpnt
->
request
->
special
=
(
void
*
)
SCpnt
;
if
(
blk_rq_tagged
(
SCpnt
->
request
))
blk_queue_end_tag
(
q
,
SCpnt
->
request
);
_elv_add_request
(
q
,
SCpnt
->
request
,
0
,
0
);
_
_
elv_add_request
(
q
,
SCpnt
->
request
,
0
,
0
);
}
/*
...
...
@@ -951,7 +951,7 @@ void scsi_request_fn(request_queue_t * q)
SCpnt
->
request
->
flags
|=
REQ_SPECIAL
;
if
(
blk_rq_tagged
(
SCpnt
->
request
))
blk_queue_end_tag
(
q
,
SCpnt
->
request
);
_elv_add_request
(
q
,
SCpnt
->
request
,
0
,
0
);
_
_
elv_add_request
(
q
,
SCpnt
->
request
,
0
,
0
);
break
;
}
...
...
drivers/scsi/sr_ioctl.c
View file @
28d92ef1
...
...
@@ -160,13 +160,11 @@ int sr_do_ioctl(Scsi_CD *cd, struct cdrom_generic_command *cgc)
if
(
!
cgc
->
quiet
)
printk
(
KERN_ERR
"%s: CDROM (ioctl) reports ILLEGAL "
"REQUEST.
\n
"
,
cd
->
cdi
.
name
);
err
=
-
EIO
;
if
(
SRpnt
->
sr_sense_buffer
[
12
]
==
0x20
&&
SRpnt
->
sr_sense_buffer
[
13
]
==
0x00
)
{
SRpnt
->
sr_sense_buffer
[
13
]
==
0x00
)
/* sense: Invalid command operation code */
err
=
-
EDRIVE_CANT_DO_THIS
;
}
else
{
err
=
-
EINVAL
;
}
#ifdef DEBUG
print_command
(
cgc
->
cmd
);
print_req_sense
(
"sr"
,
SRpnt
);
...
...
fs/bio.c
View file @
28d92ef1
...
...
@@ -122,6 +122,7 @@ inline void bio_init(struct bio *bio)
bio
->
bi_max_vecs
=
0
;
bio
->
bi_end_io
=
NULL
;
atomic_set
(
&
bio
->
bi_cnt
,
1
);
bio
->
bi_private
=
NULL
;
}
/**
...
...
@@ -354,7 +355,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
request_queue_t
*
q
=
bdev_get_queue
(
bdev
);
int
nr_pages
;
nr_pages
=
q
->
max_sectors
>>
(
PAGE_SHIFT
-
9
)
;
nr_pages
=
((
q
->
max_sectors
<<
9
)
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
if
(
nr_pages
>
q
->
max_phys_segments
)
nr_pages
=
q
->
max_phys_segments
;
if
(
nr_pages
>
q
->
max_hw_segments
)
...
...
@@ -385,13 +386,13 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* cloned bio must not modify vec list
*/
if
(
unlikely
(
bio_flagged
(
bio
,
BIO_CLONED
)))
return
1
;
return
0
;
if
(
bio
->
bi_vcnt
>=
bio
->
bi_max_vecs
)
return
1
;
return
0
;
if
(((
bio
->
bi_size
+
len
)
>>
9
)
>
q
->
max_sectors
)
return
1
;
return
0
;
/*
* we might loose a segment or two here, but rather that than
...
...
@@ -404,7 +405,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
if
(
fail_segments
)
{
if
(
retried_segments
)
return
1
;
return
0
;
bio
->
bi_flags
&=
~
(
1
<<
BIO_SEG_VALID
);
retried_segments
=
1
;
...
...
@@ -425,18 +426,24 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* depending on offset), it can specify a merge_bvec_fn in the
* queue to get further control
*/
if
(
q
->
merge_bvec_fn
&&
q
->
merge_bvec_fn
(
q
,
bio
,
bvec
))
{
bvec
->
bv_page
=
NULL
;
bvec
->
bv_len
=
0
;
bvec
->
bv_offset
=
0
;
return
1
;
if
(
q
->
merge_bvec_fn
)
{
/*
* merge_bvec_fn() returns number of bytes it can accept
* at this offset
*/
if
(
q
->
merge_bvec_fn
(
q
,
bio
,
bvec
)
<
len
)
{
bvec
->
bv_page
=
NULL
;
bvec
->
bv_len
=
0
;
bvec
->
bv_offset
=
0
;
return
0
;
}
}
bio
->
bi_vcnt
++
;
bio
->
bi_phys_segments
++
;
bio
->
bi_hw_segments
++
;
bio
->
bi_size
+=
len
;
return
0
;
return
len
;
}
/**
...
...
@@ -446,14 +453,15 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* @error: error, if any
*
* Description:
* bio_endio() will end I/O
@bytes_done number of bytes. This may be just
*
a partial part of the bio, or it may be the whole bio. bio_endio() is
* the preferred way to end I/O on a bio, it takes care of decrementing
* bio_endio() will end I/O
on @bytes_done number of bytes. This may be
*
just a partial part of the bio, or it may be the whole bio. bio_endio()
*
is
the preferred way to end I/O on a bio, it takes care of decrementing
* bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
* and one of the established -Exxxx (-EIO, for instance) error values in
* case something went wrong.
* case something went wrong. Noone should call bi_end_io() directly on
* a bio unless they own it and thus know that it has an end_io function.
**/
int
bio_endio
(
struct
bio
*
bio
,
unsigned
int
bytes_done
,
int
error
)
void
bio_endio
(
struct
bio
*
bio
,
unsigned
int
bytes_done
,
int
error
)
{
if
(
error
)
clear_bit
(
BIO_UPTODATE
,
&
bio
->
bi_flags
);
...
...
@@ -465,7 +473,9 @@ int bio_endio(struct bio *bio, unsigned int bytes_done, int error)
}
bio
->
bi_size
-=
bytes_done
;
return
bio
->
bi_end_io
(
bio
,
bytes_done
,
error
);
if
(
bio
->
bi_end_io
)
bio
->
bi_end_io
(
bio
,
bytes_done
,
error
);
}
static
void
__init
biovec_init_pools
(
void
)
...
...
@@ -537,7 +547,7 @@ static int __init init_bio(void)
return
0
;
}
module_init
(
init_bio
);
subsys_initcall
(
init_bio
);
EXPORT_SYMBOL
(
bio_alloc
);
EXPORT_SYMBOL
(
bio_put
);
...
...
fs/direct-io.c
View file @
28d92ef1
...
...
@@ -417,12 +417,12 @@ dio_bio_add_page(struct dio *dio, struct page *page,
/* Take a ref against the page each time it is placed into a BIO */
page_cache_get
(
page
);
if
(
bio_add_page
(
dio
->
bio
,
page
,
bv_len
,
bv_offset
))
{
if
(
bio_add_page
(
dio
->
bio
,
page
,
bv_len
,
bv_offset
)
<
bv_len
)
{
dio_bio_submit
(
dio
);
ret
=
dio_new_bio
(
dio
,
blkno
);
if
(
ret
==
0
)
{
ret
=
bio_add_page
(
dio
->
bio
,
page
,
bv_len
,
bv_offset
);
BUG_ON
(
ret
!=
0
);
BUG_ON
(
ret
<
bv_len
);
}
else
{
/* The page didn't make it into a BIO */
page_cache_release
(
page
);
...
...
fs/mpage.c
View file @
28d92ef1
...
...
@@ -176,6 +176,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
unsigned
first_hole
=
blocks_per_page
;
struct
block_device
*
bdev
=
NULL
;
struct
buffer_head
bh
;
int
length
;
if
(
page_has_buffers
(
page
))
goto
confused
;
...
...
@@ -233,7 +234,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
goto
confused
;
}
if
(
bio_add_page
(
bio
,
page
,
first_hole
<<
blkbits
,
0
))
{
length
=
first_hole
<<
blkbits
;
if
(
bio_add_page
(
bio
,
page
,
length
,
0
)
<
length
)
{
bio
=
mpage_bio_submit
(
READ
,
bio
);
goto
alloc_new
;
}
...
...
@@ -334,6 +336,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
int
boundary
=
0
;
sector_t
boundary_block
=
0
;
struct
block_device
*
boundary_bdev
=
NULL
;
int
length
;
if
(
page_has_buffers
(
page
))
{
struct
buffer_head
*
head
=
page_buffers
(
page
);
...
...
@@ -467,7 +470,8 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
try_to_free_buffers
(
page
);
}
if
(
bio_add_page
(
bio
,
page
,
first_unmapped
<<
blkbits
,
0
))
{
length
=
first_unmapped
<<
blkbits
;
if
(
bio_add_page
(
bio
,
page
,
length
,
0
)
<
length
)
{
bio
=
mpage_bio_submit
(
WRITE
,
bio
);
goto
alloc_new
;
}
...
...
fs/pipe.c
View file @
28d92ef1
...
...
@@ -109,7 +109,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
break
;
}
if
(
do_wakeup
)
{
wake_up_interruptible
(
PIPE_WAIT
(
*
inode
));
wake_up_interruptible
_sync
(
PIPE_WAIT
(
*
inode
));
kill_fasync
(
PIPE_FASYNC_WRITERS
(
*
inode
),
SIGIO
,
POLL_OUT
);
}
pipe_wait
(
inode
);
...
...
@@ -117,7 +117,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
up
(
PIPE_SEM
(
*
inode
));
/* Signal writers asynchronously that there is more room. */
if
(
do_wakeup
)
{
wake_up_interruptible
_sync
(
PIPE_WAIT
(
*
inode
));
wake_up_interruptible
(
PIPE_WAIT
(
*
inode
));
kill_fasync
(
PIPE_FASYNC_WRITERS
(
*
inode
),
SIGIO
,
POLL_OUT
);
}
if
(
ret
>
0
)
...
...
fs/xfs/pagebuf/page_buf.c
View file @
28d92ef1
...
...
@@ -1448,7 +1448,7 @@ pagebuf_iorequest( /* start real I/O */
if
(
nbytes
>
size
)
nbytes
=
size
;
if
(
bio_add_page
(
bio
,
pb
->
pb_pages
[
map_i
],
nbytes
,
offset
))
if
(
bio_add_page
(
bio
,
pb
->
pb_pages
[
map_i
],
nbytes
,
offset
)
<
nbytes
)
break
;
offset
=
0
;
...
...
include/asm-i386/ide.h
View file @
28d92ef1
...
...
@@ -70,6 +70,7 @@ static __inline__ void ide_init_default_hwifs(void)
int
index
;
for
(
index
=
0
;
index
<
MAX_HWIFS
;
index
++
)
{
memset
(
&
hw
,
0
,
sizeof
hw
);
ide_init_hwif_ports
(
&
hw
,
ide_default_io_base
(
index
),
0
,
NULL
);
hw
.
irq
=
ide_default_irq
(
ide_default_io_base
(
index
));
ide_register_hw
(
&
hw
,
NULL
);
...
...
include/linux/bio.h
View file @
28d92ef1
...
...
@@ -101,6 +101,7 @@ struct bio {
#define BIO_EOF 2
/* out-out-bounds error */
#define BIO_SEG_VALID 3
/* nr_hw_seg valid */
#define BIO_CLONED 4
/* doesn't own data */
#define BIO_BOUNCED 5
/* bio is a bounce bio */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
...
...
@@ -201,7 +202,7 @@ struct bio {
extern
struct
bio
*
bio_alloc
(
int
,
int
);
extern
void
bio_put
(
struct
bio
*
);
extern
int
bio_endio
(
struct
bio
*
,
unsigned
int
,
int
);
extern
void
bio_endio
(
struct
bio
*
,
unsigned
int
,
int
);
struct
request_queue
;
extern
inline
int
bio_phys_segments
(
struct
request_queue
*
,
struct
bio
*
);
extern
inline
int
bio_hw_segments
(
struct
request_queue
*
,
struct
bio
*
);
...
...
include/linux/blk.h
View file @
28d92ef1
...
...
@@ -39,33 +39,20 @@ void initrd_init(void);
*/
extern
int
end_that_request_first
(
struct
request
*
,
int
,
int
);
extern
int
end_that_request_chunk
(
struct
request
*
,
int
,
int
);
extern
void
end_that_request_last
(
struct
request
*
);
struct
request
*
elv_next_request
(
request_queue_t
*
q
);
static
inline
void
blkdev_dequeue_request
(
struct
request
*
req
)
{
list_del
(
&
req
->
queuelist
);
BUG_ON
(
list_empty
(
&
req
->
queuelist
));
list_del_init
(
&
req
->
queuelist
);
if
(
req
->
q
)
elv_remove_request
(
req
->
q
,
req
);
}
#define _elv_add_request_core(q, rq, where, plug) \
do { \
if ((plug)) \
blk_plug_device((q)); \
(q)->elevator.elevator_add_req_fn((q), (rq), (where)); \
} while (0)
#define _elv_add_request(q, rq, back, p) do { \
if ((back)) \
_elv_add_request_core((q), (rq), (q)->queue_head.prev, (p)); \
else \
_elv_add_request_core((q), (rq), &(q)->queue_head, (p)); \
} while (0)
#define elv_add_request(q, rq, back) _elv_add_request((q), (rq), (back), 1)
#if defined(MAJOR_NR) || defined(IDE_DRIVER)
#if (MAJOR_NR != SCSI_TAPE_MAJOR) && (MAJOR_NR != OSST_MAJOR)
#if !defined(IDE_DRIVER)
...
...
include/linux/blkdev.h
View file @
28d92ef1
...
...
@@ -26,6 +26,8 @@ struct request {
struct
list_head
queuelist
;
/* looking for ->queue? you must _not_
* access it directly, use
* blkdev_dequeue_request! */
int
ref_count
;
void
*
elevator_private
;
unsigned
char
cmd
[
16
];
...
...
@@ -215,6 +217,7 @@ struct request_queue
unsigned
int
max_segment_size
;
unsigned
long
seg_boundary_mask
;
unsigned
int
dma_alignment
;
wait_queue_head_t
queue_wait
;
...
...
@@ -254,6 +257,13 @@ struct request_queue
*/
#define blk_queue_headactive(q, head_active)
/*
* q->prep_rq_fn return values
*/
#define BLKPREP_OK 0
/* serve it */
#define BLKPREP_KILL 1
/* fatal error, kill */
#define BLKPREP_DEFER 2
/* leave on queue */
extern
unsigned
long
blk_max_low_pfn
,
blk_max_pfn
;
/*
...
...
@@ -268,7 +278,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
extern
int
init_emergency_isa_pool
(
void
);
void
blk_queue_bounce
(
request_queue_t
*
q
,
struct
bio
**
bio
);
inline
void
blk_queue_bounce
(
request_queue_t
*
q
,
struct
bio
**
bio
);
#define rq_for_each_bio(bio, rq) \
if ((rq->bio)) \
...
...
@@ -339,6 +349,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern
void
blk_queue_assign_lock
(
request_queue_t
*
,
spinlock_t
*
);
extern
void
blk_queue_prep_rq
(
request_queue_t
*
,
prep_rq_fn
*
pfn
);
extern
void
blk_queue_merge_bvec
(
request_queue_t
*
,
merge_bvec_fn
*
);
extern
void
blk_queue_dma_alignment
(
request_queue_t
*
,
int
);
extern
struct
backing_dev_info
*
blk_get_backing_dev_info
(
struct
block_device
*
bdev
);
extern
int
blk_rq_map_sg
(
request_queue_t
*
,
struct
request
*
,
struct
scatterlist
*
);
...
...
@@ -385,6 +396,21 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
return
queue_hardsect_size
(
bdev_get_queue
(
bdev
));
}
static
inline
int
queue_dma_alignment
(
request_queue_t
*
q
)
{
int
retval
=
511
;
if
(
q
&&
q
->
dma_alignment
)
retval
=
q
->
dma_alignment
;
return
retval
;
}
static
inline
int
bdev_dma_aligment
(
struct
block_device
*
bdev
)
{
return
queue_dma_alignment
(
bdev_get_queue
(
bdev
));
}
#define blk_finished_io(nsects) do { } while (0)
#define blk_started_io(nsects) do { } while (0)
...
...
include/linux/elevator.h
View file @
28d92ef1
...
...
@@ -40,8 +40,8 @@ struct elevator_s
/*
* block elevator interface
*/
extern
void
__elv_add_request
(
request_queue_t
*
,
struct
request
*
,
struct
list_head
*
);
extern
void
elv_add_request
(
request_queue_t
*
,
struct
request
*
,
int
,
int
);
extern
void
__elv_add_request
(
request_queue_t
*
,
struct
request
*
,
int
,
int
);
extern
int
elv_merge
(
request_queue_t
*
,
struct
list_head
**
,
struct
bio
*
);
extern
void
elv_merge_requests
(
request_queue_t
*
,
struct
request
*
,
struct
request
*
);
...
...
@@ -50,6 +50,9 @@ extern void elv_remove_request(request_queue_t *, struct request *);
extern
int
elv_queue_empty
(
request_queue_t
*
);
extern
inline
struct
list_head
*
elv_get_sort_head
(
request_queue_t
*
,
struct
request
*
);
#define __elv_add_request_pos(q, rq, pos) \
(q)->elevator.elevator_add_req_fn((q), (rq), (pos))
/*
* noop I/O scheduler. always merges, always inserts new request at tail
*/
...
...
mm/highmem.c
View file @
28d92ef1
...
...
@@ -366,34 +366,13 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
return
0
;
}
void
blk_queue_bounce
(
request_queue_t
*
q
,
struct
bio
**
bio_orig
)
void
__blk_queue_bounce
(
request_queue_t
*
q
,
struct
bio
**
bio_orig
,
int
bio_gfp
,
mempool_t
*
pool
)
{
struct
page
*
page
;
struct
bio
*
bio
=
NULL
;
int
i
,
rw
=
bio_data_dir
(
*
bio_orig
)
,
bio_gfp
;
int
i
,
rw
=
bio_data_dir
(
*
bio_orig
);
struct
bio_vec
*
to
,
*
from
;
mempool_t
*
pool
;
unsigned
long
pfn
=
q
->
bounce_pfn
;
int
gfp
=
q
->
bounce_gfp
;
BUG_ON
((
*
bio_orig
)
->
bi_idx
);
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments
*/
if
(
!
(
gfp
&
GFP_DMA
))
{
if
(
pfn
>=
blk_max_pfn
)
return
;
bio_gfp
=
GFP_NOHIGHIO
;
pool
=
page_pool
;
}
else
{
BUG_ON
(
!
isa_page_pool
);
bio_gfp
=
GFP_NOIO
;
pool
=
isa_page_pool
;
}
bio_for_each_segment
(
from
,
*
bio_orig
,
i
)
{
page
=
from
->
bv_page
;
...
...
@@ -401,7 +380,7 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
/*
* is destination page below bounce pfn?
*/
if
((
page
-
page_zone
(
page
)
->
zone_mem_map
)
+
(
page_zone
(
page
)
->
zone_start_pfn
)
<
pfn
)
if
((
page
-
page_zone
(
page
)
->
zone_mem_map
)
+
(
page_zone
(
page
)
->
zone_start_pfn
)
<
q
->
bounce_
pfn
)
continue
;
/*
...
...
@@ -412,11 +391,11 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
to
=
bio
->
bi_io_vec
+
i
;
to
->
bv_page
=
mempool_alloc
(
pool
,
gfp
);
to
->
bv_page
=
mempool_alloc
(
pool
,
q
->
bounce_
gfp
);
to
->
bv_len
=
from
->
bv_len
;
to
->
bv_offset
=
from
->
bv_offset
;
if
(
rw
&
WRITE
)
{
if
(
rw
==
WRITE
)
{
char
*
vto
,
*
vfrom
;
vto
=
page_address
(
to
->
bv_page
)
+
to
->
bv_offset
;
...
...
@@ -437,15 +416,16 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
* pages
*/
bio_for_each_segment
(
from
,
*
bio_orig
,
i
)
{
to
=
&
bio
->
bi_io_vec
[
i
]
;
to
=
bio_iovec_idx
(
bio
,
i
)
;
if
(
!
to
->
bv_page
)
{
to
->
bv_page
=
from
->
bv_page
;
to
->
bv_len
=
from
->
bv_len
;
to
->
bv_offset
=
to
->
bv_offset
;
to
->
bv_offset
=
from
->
bv_offset
;
}
}
bio
->
bi_bdev
=
(
*
bio_orig
)
->
bi_bdev
;
bio
->
bi_flags
|=
(
1
<<
BIO_BOUNCED
);
bio
->
bi_sector
=
(
*
bio_orig
)
->
bi_sector
;
bio
->
bi_rw
=
(
*
bio_orig
)
->
bi_rw
;
...
...
@@ -454,14 +434,12 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
bio
->
bi_size
=
(
*
bio_orig
)
->
bi_size
;
if
(
pool
==
page_pool
)
{
if
(
rw
&
WRITE
)
bio
->
bi_end_io
=
bounce_end_io_write
;
else
bio
->
bi_end_io
=
bounce_end_io_write
;
if
(
rw
==
READ
)
bio
->
bi_end_io
=
bounce_end_io_read
;
}
else
{
if
(
rw
&
WRITE
)
bio
->
bi_end_io
=
bounce_end_io_write_isa
;
else
bio
->
bi_end_io
=
bounce_end_io_write_isa
;
if
(
rw
==
READ
)
bio
->
bi_end_io
=
bounce_end_io_read_isa
;
}
...
...
@@ -469,6 +447,37 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
*
bio_orig
=
bio
;
}
inline
void
blk_queue_bounce
(
request_queue_t
*
q
,
struct
bio
**
bio_orig
)
{
mempool_t
*
pool
;
int
bio_gfp
;
BUG_ON
((
*
bio_orig
)
->
bi_idx
);
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments
*/
if
(
!
(
q
->
bounce_gfp
&
GFP_DMA
))
{
if
(
q
->
bounce_pfn
>=
blk_max_pfn
)
return
;
bio_gfp
=
GFP_NOHIGHIO
;
pool
=
page_pool
;
}
else
{
BUG_ON
(
!
isa_page_pool
);
bio_gfp
=
GFP_NOIO
;
pool
=
isa_page_pool
;
}
/*
* slow path
*/
__blk_queue_bounce
(
q
,
bio_orig
,
bio_gfp
,
pool
);
}
#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_HIGHMEM)
void
check_highmem_ptes
(
void
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment