Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
6728cb0e
Commit
6728cb0e
authored
Jan 31, 2008
by
Jens Axboe
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
block: make core bits checkpatch compliant
Signed-off-by:
Jens Axboe
<
jens.axboe@oracle.com
>
parent
22b13210
Changes
8
Show whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
117 additions
and
142 deletions
+117
-142
block/blk-barrier.c
block/blk-barrier.c
+2
-3
block/blk-core.c
block/blk-core.c
+72
-81
block/blk-exec.c
block/blk-exec.c
+0
-1
block/blk-map.c
block/blk-map.c
+4
-6
block/blk-merge.c
block/blk-merge.c
+6
-6
block/blk-settings.c
block/blk-settings.c
+27
-34
block/blk-sysfs.c
block/blk-sysfs.c
+3
-2
block/blk-tag.c
block/blk-tag.c
+3
-9
No files found.
block/blk-barrier.c
View file @
6728cb0e
...
...
@@ -26,7 +26,8 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
{
if
(
ordered
&
(
QUEUE_ORDERED_PREFLUSH
|
QUEUE_ORDERED_POSTFLUSH
)
&&
prepare_flush_fn
==
NULL
)
{
printk
(
KERN_ERR
"blk_queue_ordered: prepare_flush_fn required
\n
"
);
printk
(
KERN_ERR
"%s: prepare_flush_fn required
\n
"
,
__FUNCTION__
);
return
-
EINVAL
;
}
...
...
@@ -47,7 +48,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
return
0
;
}
EXPORT_SYMBOL
(
blk_queue_ordered
);
/*
...
...
@@ -315,5 +315,4 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
bio_put
(
bio
);
return
ret
;
}
EXPORT_SYMBOL
(
blkdev_issue_flush
);
block/blk-core.c
View file @
6728cb0e
...
...
@@ -3,7 +3,8 @@
* Copyright (C) 1994, Karl Keyte: Added support for disk statistics
* Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
* Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
* kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000
* kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
* - July2000
* bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
*/
...
...
@@ -42,7 +43,7 @@ struct kmem_cache *request_cachep;
/*
* For queue allocation
*/
struct
kmem_cache
*
blk_requestq_cachep
=
NULL
;
struct
kmem_cache
*
blk_requestq_cachep
;
/*
* Controlling structure to kblockd
...
...
@@ -137,7 +138,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
error
=
-
EIO
;
if
(
unlikely
(
nbytes
>
bio
->
bi_size
))
{
printk
(
"%s: want %u bytes done, only
%u left
\n
"
,
printk
(
KERN_ERR
"%s: want %u bytes done,
%u left
\n
"
,
__FUNCTION__
,
nbytes
,
bio
->
bi_size
);
nbytes
=
bio
->
bi_size
;
}
...
...
@@ -161,23 +162,26 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
{
int
bit
;
printk
(
"%s: dev %s: type=%x, flags=%x
\n
"
,
msg
,
printk
(
KERN_INFO
"%s: dev %s: type=%x, flags=%x
\n
"
,
msg
,
rq
->
rq_disk
?
rq
->
rq_disk
->
disk_name
:
"?"
,
rq
->
cmd_type
,
rq
->
cmd_flags
);
printk
(
"
\n
sector %llu, nr/cnr %lu/%u
\n
"
,
(
unsigned
long
long
)
rq
->
sector
,
printk
(
KERN_INFO
" sector %llu, nr/cnr %lu/%u
\n
"
,
(
unsigned
long
long
)
rq
->
sector
,
rq
->
nr_sectors
,
rq
->
current_nr_sectors
);
printk
(
"bio %p, biotail %p, buffer %p, data %p, len %u
\n
"
,
rq
->
bio
,
rq
->
biotail
,
rq
->
buffer
,
rq
->
data
,
rq
->
data_len
);
printk
(
KERN_INFO
" bio %p, biotail %p, buffer %p, data %p, len %u
\n
"
,
rq
->
bio
,
rq
->
biotail
,
rq
->
buffer
,
rq
->
data
,
rq
->
data_len
);
if
(
blk_pc_request
(
rq
))
{
printk
(
"
cdb: "
);
printk
(
KERN_INFO
"
cdb: "
);
for
(
bit
=
0
;
bit
<
sizeof
(
rq
->
cmd
);
bit
++
)
printk
(
"%02x "
,
rq
->
cmd
[
bit
]);
printk
(
"
\n
"
);
}
}
EXPORT_SYMBOL
(
blk_dump_rq_flags
);
/*
...
...
@@ -204,7 +208,6 @@ void blk_plug_device(struct request_queue *q)
blk_add_trace_generic
(
q
,
NULL
,
0
,
BLK_TA_PLUG
);
}
}
EXPORT_SYMBOL
(
blk_plug_device
);
/*
...
...
@@ -221,7 +224,6 @@ int blk_remove_plug(struct request_queue *q)
del_timer
(
&
q
->
unplug_timer
);
return
1
;
}
EXPORT_SYMBOL
(
blk_remove_plug
);
/*
...
...
@@ -328,7 +330,6 @@ void blk_start_queue(struct request_queue *q)
kblockd_schedule_work
(
&
q
->
unplug_work
);
}
}
EXPORT_SYMBOL
(
blk_start_queue
);
/**
...
...
@@ -408,7 +409,7 @@ void blk_put_queue(struct request_queue *q)
}
EXPORT_SYMBOL
(
blk_put_queue
);
void
blk_cleanup_queue
(
struct
request_queue
*
q
)
void
blk_cleanup_queue
(
struct
request_queue
*
q
)
{
mutex_lock
(
&
q
->
sysfs_lock
);
set_bit
(
QUEUE_FLAG_DEAD
,
&
q
->
queue_flags
);
...
...
@@ -419,7 +420,6 @@ void blk_cleanup_queue(struct request_queue * q)
blk_put_queue
(
q
);
}
EXPORT_SYMBOL
(
blk_cleanup_queue
);
static
int
blk_init_free_list
(
struct
request_queue
*
q
)
...
...
@@ -575,7 +575,6 @@ int blk_get_queue(struct request_queue *q)
return
1
;
}
EXPORT_SYMBOL
(
blk_get_queue
);
static
inline
void
blk_free_request
(
struct
request_queue
*
q
,
struct
request
*
rq
)
...
...
@@ -888,7 +887,6 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
elv_requeue_request
(
q
,
rq
);
}
EXPORT_SYMBOL
(
blk_requeue_request
);
/**
...
...
@@ -939,7 +937,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
blk_start_queueing
(
q
);
spin_unlock_irqrestore
(
q
->
queue_lock
,
flags
);
}
EXPORT_SYMBOL
(
blk_insert_request
);
/*
...
...
@@ -947,7 +944,7 @@ EXPORT_SYMBOL(blk_insert_request);
* queue lock is held and interrupts disabled, as we muck with the
* request queue list.
*/
static
inline
void
add_request
(
struct
request_queue
*
q
,
struct
request
*
req
)
static
inline
void
add_request
(
struct
request_queue
*
q
,
struct
request
*
req
)
{
drive_stat_acct
(
req
,
1
);
...
...
@@ -987,7 +984,6 @@ void disk_round_stats(struct gendisk *disk)
}
disk
->
stamp
=
now
;
}
EXPORT_SYMBOL_GPL
(
disk_round_stats
);
/*
...
...
@@ -1017,7 +1013,6 @@ void __blk_put_request(struct request_queue *q, struct request *req)
freed_request
(
q
,
rw
,
priv
);
}
}
EXPORT_SYMBOL_GPL
(
__blk_put_request
);
void
blk_put_request
(
struct
request
*
req
)
...
...
@@ -1035,7 +1030,6 @@ void blk_put_request(struct request *req)
spin_unlock_irqrestore
(
q
->
queue_lock
,
flags
);
}
}
EXPORT_SYMBOL
(
blk_put_request
);
void
init_request_from_bio
(
struct
request
*
req
,
struct
bio
*
bio
)
...
...
@@ -1350,7 +1344,7 @@ static inline void __generic_make_request(struct bio *bio)
}
if
(
unlikely
(
nr_sectors
>
q
->
max_hw_sectors
))
{
printk
(
"bio too big device %s (%u > %u)
\n
"
,
printk
(
KERN_ERR
"bio too big device %s (%u > %u)
\n
"
,
bdevname
(
bio
->
bi_bdev
,
b
),
bio_sectors
(
bio
),
q
->
max_hw_sectors
);
...
...
@@ -1439,7 +1433,6 @@ void generic_make_request(struct bio *bio)
}
while
(
bio
);
current
->
bio_tail
=
NULL
;
/* deactivate */
}
EXPORT_SYMBOL
(
generic_make_request
);
/**
...
...
@@ -1480,13 +1473,12 @@ void submit_bio(int rw, struct bio *bio)
current
->
comm
,
task_pid_nr
(
current
),
(
rw
&
WRITE
)
?
"WRITE"
:
"READ"
,
(
unsigned
long
long
)
bio
->
bi_sector
,
bdevname
(
bio
->
bi_bdev
,
b
));
bdevname
(
bio
->
bi_bdev
,
b
));
}
}
generic_make_request
(
bio
);
}
EXPORT_SYMBOL
(
submit_bio
);
/**
...
...
@@ -1518,9 +1510,8 @@ static int __end_that_request_first(struct request *req, int error,
if
(
!
blk_pc_request
(
req
))
req
->
errors
=
0
;
if
(
error
)
{
if
(
blk_fs_request
(
req
)
&&
!
(
req
->
cmd_flags
&
REQ_QUIET
))
printk
(
"end_request: I/O error, dev %s, sector %llu
\n
"
,
if
(
error
&&
(
blk_fs_request
(
req
)
&&
!
(
req
->
cmd_flags
&
REQ_QUIET
)))
{
printk
(
KERN_ERR
"end_request: I/O error, dev %s, sector %llu
\n
"
,
req
->
rq_disk
?
req
->
rq_disk
->
disk_name
:
"?"
,
(
unsigned
long
long
)
req
->
sector
);
}
...
...
@@ -1554,9 +1545,9 @@ static int __end_that_request_first(struct request *req, int error,
if
(
unlikely
(
bio
->
bi_idx
>=
bio
->
bi_vcnt
))
{
blk_dump_rq_flags
(
req
,
"__end_that"
);
printk
(
"%s: bio idx %d >= vcnt %d
\n
"
,
__FUNCTION__
,
bio
->
bi_
idx
,
bio
->
bi_
vcnt
);
printk
(
KERN_ERR
"%s: bio idx %d >= vcnt %d
\n
"
,
__FUNCTION__
,
bio
->
bi_idx
,
bio
->
bi_vcnt
);
break
;
}
...
...
@@ -1582,7 +1573,8 @@ static int __end_that_request_first(struct request *req, int error,
total_bytes
+=
nbytes
;
nr_bytes
-=
nbytes
;
if
((
bio
=
req
->
bio
))
{
bio
=
req
->
bio
;
if
(
bio
)
{
/*
* end more in this run, or just return 'not-done'
*/
...
...
@@ -1626,15 +1618,16 @@ static void blk_done_softirq(struct softirq_action *h)
local_irq_enable
();
while
(
!
list_empty
(
&
local_list
))
{
struct
request
*
rq
=
list_entry
(
local_list
.
next
,
struct
request
,
donelist
)
;
struct
request
*
rq
;
rq
=
list_entry
(
local_list
.
next
,
struct
request
,
donelist
);
list_del_init
(
&
rq
->
donelist
);
rq
->
q
->
softirq_done_fn
(
rq
);
}
}
static
int
__cpuinit
blk_cpu_notify
(
struct
notifier_block
*
self
,
unsigned
long
action
,
void
*
hcpu
)
static
int
__cpuinit
blk_cpu_notify
(
struct
notifier_block
*
self
,
unsigned
long
action
,
void
*
hcpu
)
{
/*
* If a CPU goes away, splice its entries to the current CPU
...
...
@@ -1685,7 +1678,6 @@ void blk_complete_request(struct request *req)
local_irq_restore
(
flags
);
}
EXPORT_SYMBOL
(
blk_complete_request
);
/*
...
...
@@ -2002,7 +1994,6 @@ int kblockd_schedule_work(struct work_struct *work)
{
return
queue_work
(
kblockd_workqueue
,
work
);
}
EXPORT_SYMBOL
(
kblockd_schedule_work
);
void
kblockd_flush_work
(
struct
work_struct
*
work
)
...
...
block/blk-exec.c
View file @
6728cb0e
...
...
@@ -101,5 +101,4 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
return
err
;
}
EXPORT_SYMBOL
(
blk_execute_rq
);
block/blk-map.c
View file @
6728cb0e
...
...
@@ -53,7 +53,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
* direct dma. else, set up kernel bounce buffers
*/
uaddr
=
(
unsigned
long
)
ubuf
;
if
(
!
(
uaddr
&
queue_dma_alignment
(
q
))
&&
!
(
len
&
queue_dma_alignment
(
q
)))
if
(
!
(
uaddr
&
queue_dma_alignment
(
q
))
&&
!
(
len
&
queue_dma_alignment
(
q
)))
bio
=
bio_map_user
(
q
,
NULL
,
uaddr
,
len
,
reading
);
else
bio
=
bio_copy_user
(
q
,
uaddr
,
len
,
reading
);
...
...
@@ -144,7 +145,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
blk_rq_unmap_user
(
bio
);
return
ret
;
}
EXPORT_SYMBOL
(
blk_rq_map_user
);
/**
...
...
@@ -179,7 +179,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
/* we don't allow misaligned data like bio_map_user() does. If the
* user is using sg, they're expected to know the alignment constraints
* and respect them accordingly */
bio
=
bio_map_user_iov
(
q
,
NULL
,
iov
,
iov_count
,
rq_data_dir
(
rq
)
==
READ
);
bio
=
bio_map_user_iov
(
q
,
NULL
,
iov
,
iov_count
,
rq_data_dir
(
rq
)
==
READ
);
if
(
IS_ERR
(
bio
))
return
PTR_ERR
(
bio
);
...
...
@@ -194,7 +195,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
rq
->
buffer
=
rq
->
data
=
NULL
;
return
0
;
}
EXPORT_SYMBOL
(
blk_rq_map_user_iov
);
/**
...
...
@@ -227,7 +227,6 @@ int blk_rq_unmap_user(struct bio *bio)
return
ret
;
}
EXPORT_SYMBOL
(
blk_rq_unmap_user
);
/**
...
...
@@ -260,5 +259,4 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
rq
->
buffer
=
rq
->
data
=
NULL
;
return
0
;
}
EXPORT_SYMBOL
(
blk_rq_map_kern
);
block/blk-merge.c
View file @
6728cb0e
...
...
@@ -32,7 +32,7 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
* size, something has gone terribly wrong
*/
if
(
rq
->
nr_sectors
<
rq
->
current_nr_sectors
)
{
printk
(
"blk: request botched
\n
"
);
printk
(
KERN_ERR
"blk: request botched
\n
"
);
rq
->
nr_sectors
=
rq
->
current_nr_sectors
;
}
}
...
...
@@ -235,7 +235,6 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
return
nsegs
;
}
EXPORT_SYMBOL
(
blk_rq_map_sg
);
static
inline
int
ll_new_mergeable
(
struct
request_queue
*
q
,
...
...
@@ -305,8 +304,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
if
(
unlikely
(
!
bio_flagged
(
bio
,
BIO_SEG_VALID
)))
blk_recount_segments
(
q
,
bio
);
len
=
req
->
biotail
->
bi_hw_back_size
+
bio
->
bi_hw_front_size
;
if
(
BIOVEC_VIRT_MERGEABLE
(
__BVEC_END
(
req
->
biotail
),
__BVEC_START
(
bio
))
&&
!
BIOVEC_VIRT_OVERSIZE
(
len
))
{
if
(
BIOVEC_VIRT_MERGEABLE
(
__BVEC_END
(
req
->
biotail
),
__BVEC_START
(
bio
))
&&
!
BIOVEC_VIRT_OVERSIZE
(
len
))
{
int
mergeable
=
ll_new_mergeable
(
q
,
req
,
bio
);
if
(
mergeable
)
{
...
...
@@ -388,7 +387,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
total_hw_segments
=
req
->
nr_hw_segments
+
next
->
nr_hw_segments
;
if
(
blk_hw_contig_segment
(
q
,
req
->
biotail
,
next
->
bio
))
{
int
len
=
req
->
biotail
->
bi_hw_back_size
+
next
->
bio
->
bi_hw_front_size
;
int
len
=
req
->
biotail
->
bi_hw_back_size
+
next
->
bio
->
bi_hw_front_size
;
/*
* propagate the combined length to the end of the requests
*/
...
...
block/blk-settings.c
View file @
6728cb0e
...
...
@@ -10,8 +10,10 @@
#include "blk.h"
unsigned
long
blk_max_low_pfn
,
blk_max_pfn
;
unsigned
long
blk_max_low_pfn
;
EXPORT_SYMBOL
(
blk_max_low_pfn
);
unsigned
long
blk_max_pfn
;
EXPORT_SYMBOL
(
blk_max_pfn
);
/**
...
...
@@ -29,7 +31,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
{
q
->
prep_rq_fn
=
pfn
;
}
EXPORT_SYMBOL
(
blk_queue_prep_rq
);
/**
...
...
@@ -52,14 +53,12 @@ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
{
q
->
merge_bvec_fn
=
mbfn
;
}
EXPORT_SYMBOL
(
blk_queue_merge_bvec
);
void
blk_queue_softirq_done
(
struct
request_queue
*
q
,
softirq_done_fn
*
fn
)
{
q
->
softirq_done_fn
=
fn
;
}
EXPORT_SYMBOL
(
blk_queue_softirq_done
);
/**
...
...
@@ -84,7 +83,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done);
* __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
* blk_queue_bounce() to create a buffer in normal memory.
**/
void
blk_queue_make_request
(
struct
request_queue
*
q
,
make_request_fn
*
mfn
)
void
blk_queue_make_request
(
struct
request_queue
*
q
,
make_request_fn
*
mfn
)
{
/*
* set defaults
...
...
@@ -93,7 +92,8 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
blk_queue_max_phys_segments
(
q
,
MAX_PHYS_SEGMENTS
);
blk_queue_max_hw_segments
(
q
,
MAX_HW_SEGMENTS
);
q
->
make_request_fn
=
mfn
;
q
->
backing_dev_info
.
ra_pages
=
(
VM_MAX_READAHEAD
*
1024
)
/
PAGE_CACHE_SIZE
;
q
->
backing_dev_info
.
ra_pages
=
(
VM_MAX_READAHEAD
*
1024
)
/
PAGE_CACHE_SIZE
;
q
->
backing_dev_info
.
state
=
0
;
q
->
backing_dev_info
.
capabilities
=
BDI_CAP_MAP_COPY
;
blk_queue_max_sectors
(
q
,
SAFE_MAX_SECTORS
);
...
...
@@ -117,7 +117,6 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
*/
blk_queue_bounce_limit
(
q
,
BLK_BOUNCE_HIGH
);
}
EXPORT_SYMBOL
(
blk_queue_make_request
);
/**
...
...
@@ -133,7 +132,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
**/
void
blk_queue_bounce_limit
(
struct
request_queue
*
q
,
u64
dma_addr
)
{
unsigned
long
b
ounce
_pfn
=
dma_addr
>>
PAGE_SHIFT
;
unsigned
long
b_pfn
=
dma_addr
>>
PAGE_SHIFT
;
int
dma
=
0
;
q
->
bounce_gfp
=
GFP_NOIO
;
...
...
@@ -141,21 +140,20 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
/* Assume anything <= 4GB can be handled by IOMMU.
Actually some IOMMUs can handle everything, but I don't
know of a way to test this here. */
if
(
b
ounce_pfn
<
(
min_t
(
u64
,
0xffffffff
,
BLK_BOUNCE_HIGH
)
>>
PAGE_SHIFT
))
if
(
b
_pfn
<
(
min_t
(
u64
,
0xffffffff
,
BLK_BOUNCE_HIGH
)
>>
PAGE_SHIFT
))
dma
=
1
;
q
->
bounce_pfn
=
max_low_pfn
;
#else
if
(
b
ounce
_pfn
<
blk_max_low_pfn
)
if
(
b_pfn
<
blk_max_low_pfn
)
dma
=
1
;
q
->
bounce_pfn
=
b
ounce
_pfn
;
q
->
bounce_pfn
=
b_pfn
;
#endif
if
(
dma
)
{
init_emergency_isa_pool
();
q
->
bounce_gfp
=
GFP_NOIO
|
GFP_DMA
;
q
->
bounce_pfn
=
b
ounce
_pfn
;
q
->
bounce_pfn
=
b_pfn
;
}
}
EXPORT_SYMBOL
(
blk_queue_bounce_limit
);
/**
...
...
@@ -171,7 +169,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
{
if
((
max_sectors
<<
9
)
<
PAGE_CACHE_SIZE
)
{
max_sectors
=
1
<<
(
PAGE_CACHE_SHIFT
-
9
);
printk
(
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_sectors
);
printk
(
KERN_INFO
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_sectors
);
}
if
(
BLK_DEF_MAX_SECTORS
>
max_sectors
)
...
...
@@ -181,7 +180,6 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
q
->
max_hw_sectors
=
max_sectors
;
}
}
EXPORT_SYMBOL
(
blk_queue_max_sectors
);
/**
...
...
@@ -199,12 +197,12 @@ void blk_queue_max_phys_segments(struct request_queue *q,
{
if
(
!
max_segments
)
{
max_segments
=
1
;
printk
(
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_segments
);
printk
(
KERN_INFO
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_segments
);
}
q
->
max_phys_segments
=
max_segments
;
}
EXPORT_SYMBOL
(
blk_queue_max_phys_segments
);
/**
...
...
@@ -223,12 +221,12 @@ void blk_queue_max_hw_segments(struct request_queue *q,
{
if
(
!
max_segments
)
{
max_segments
=
1
;
printk
(
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_segments
);
printk
(
KERN_INFO
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_segments
);
}
q
->
max_hw_segments
=
max_segments
;
}
EXPORT_SYMBOL
(
blk_queue_max_hw_segments
);
/**
...
...
@@ -244,12 +242,12 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
if
(
max_size
<
PAGE_CACHE_SIZE
)
{
max_size
=
PAGE_CACHE_SIZE
;
printk
(
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_size
);
printk
(
KERN_INFO
"%s: set to minimum %d
\n
"
,
__FUNCTION__
,
max_size
);
}
q
->
max_segment_size
=
max_size
;
}
EXPORT_SYMBOL
(
blk_queue_max_segment_size
);
/**
...
...
@@ -267,7 +265,6 @@ void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
{
q
->
hardsect_size
=
size
;
}
EXPORT_SYMBOL
(
blk_queue_hardsect_size
);
/*
...
...
@@ -283,17 +280,16 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
void
blk_queue_stack_limits
(
struct
request_queue
*
t
,
struct
request_queue
*
b
)
{
/* zero is "infinity" */
t
->
max_sectors
=
min_not_zero
(
t
->
max_sectors
,
b
->
max_sectors
);
t
->
max_hw_sectors
=
min_not_zero
(
t
->
max_hw_sectors
,
b
->
max_hw_sectors
);
t
->
max_sectors
=
min_not_zero
(
t
->
max_sectors
,
b
->
max_sectors
);
t
->
max_hw_sectors
=
min_not_zero
(
t
->
max_hw_sectors
,
b
->
max_hw_sectors
);
t
->
max_phys_segments
=
min
(
t
->
max_phys_segments
,
b
->
max_phys_segments
);
t
->
max_hw_segments
=
min
(
t
->
max_hw_segments
,
b
->
max_hw_segments
);
t
->
max_segment_size
=
min
(
t
->
max_segment_size
,
b
->
max_segment_size
);
t
->
hardsect_size
=
max
(
t
->
hardsect_size
,
b
->
hardsect_size
);
t
->
max_phys_segments
=
min
(
t
->
max_phys_segments
,
b
->
max_phys_segments
);
t
->
max_hw_segments
=
min
(
t
->
max_hw_segments
,
b
->
max_hw_segments
);
t
->
max_segment_size
=
min
(
t
->
max_segment_size
,
b
->
max_segment_size
);
t
->
hardsect_size
=
max
(
t
->
hardsect_size
,
b
->
hardsect_size
);
if
(
!
test_bit
(
QUEUE_FLAG_CLUSTER
,
&
b
->
queue_flags
))
clear_bit
(
QUEUE_FLAG_CLUSTER
,
&
t
->
queue_flags
);
}
EXPORT_SYMBOL
(
blk_queue_stack_limits
);
/**
...
...
@@ -332,7 +328,6 @@ int blk_queue_dma_drain(struct request_queue *q, void *buf,
return
0
;
}
EXPORT_SYMBOL_GPL
(
blk_queue_dma_drain
);
/**
...
...
@@ -344,12 +339,12 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
if
(
mask
<
PAGE_CACHE_SIZE
-
1
)
{
mask
=
PAGE_CACHE_SIZE
-
1
;
printk
(
"%s: set to minimum %lx
\n
"
,
__FUNCTION__
,
mask
);
printk
(
KERN_INFO
"%s: set to minimum %lx
\n
"
,
__FUNCTION__
,
mask
);
}
q
->
seg_boundary_mask
=
mask
;
}
EXPORT_SYMBOL
(
blk_queue_segment_boundary
);
/**
...
...
@@ -366,7 +361,6 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask)
{
q
->
dma_alignment
=
mask
;
}
EXPORT_SYMBOL
(
blk_queue_dma_alignment
);
/**
...
...
@@ -390,7 +384,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
if
(
mask
>
q
->
dma_alignment
)
q
->
dma_alignment
=
mask
;
}
EXPORT_SYMBOL
(
blk_queue_update_dma_alignment
);
int
__init
blk_settings_init
(
void
)
...
...
block/blk-sysfs.c
View file @
6728cb0e
...
...
@@ -207,12 +207,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
const
char
*
page
,
size_t
length
)
{
struct
queue_sysfs_entry
*
entry
=
to_queue
(
attr
);
struct
request_queue
*
q
=
container_of
(
kobj
,
struct
request_queue
,
kobj
);
struct
request_queue
*
q
;
ssize_t
res
;
if
(
!
entry
->
store
)
return
-
EIO
;
q
=
container_of
(
kobj
,
struct
request_queue
,
kobj
);
mutex_lock
(
&
q
->
sysfs_lock
);
if
(
test_bit
(
QUEUE_FLAG_DEAD
,
&
q
->
queue_flags
))
{
mutex_unlock
(
&
q
->
sysfs_lock
);
...
...
block/blk-tag.c
View file @
6728cb0e
...
...
@@ -21,7 +21,6 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
{
return
blk_map_queue_find_tag
(
q
->
queue_tags
,
tag
);
}
EXPORT_SYMBOL
(
blk_queue_find_tag
);
/**
...
...
@@ -99,7 +98,6 @@ void blk_queue_free_tags(struct request_queue *q)
{
clear_bit
(
QUEUE_FLAG_QUEUED
,
&
q
->
queue_flags
);
}
EXPORT_SYMBOL
(
blk_queue_free_tags
);
static
int
...
...
@@ -185,7 +183,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
if
(
!
tags
)
goto
fail
;
}
else
if
(
q
->
queue_tags
)
{
if
((
rc
=
blk_queue_resize_tags
(
q
,
depth
)))
rc
=
blk_queue_resize_tags
(
q
,
depth
);
if
(
rc
)
return
rc
;
set_bit
(
QUEUE_FLAG_QUEUED
,
&
q
->
queue_flags
);
return
0
;
...
...
@@ -203,7 +202,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
kfree
(
tags
);
return
-
ENOMEM
;
}
EXPORT_SYMBOL
(
blk_queue_init_tags
);
/**
...
...
@@ -260,7 +258,6 @@ int blk_queue_resize_tags(struct request_queue *q, int new_depth)
kfree
(
tag_map
);
return
0
;
}
EXPORT_SYMBOL
(
blk_queue_resize_tags
);
/**
...
...
@@ -313,7 +310,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
clear_bit_unlock
(
tag
,
bqt
->
tag_map
);
bqt
->
busy
--
;
}
EXPORT_SYMBOL
(
blk_queue_end_tag
);
/**
...
...
@@ -370,7 +366,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
bqt
->
busy
++
;
return
0
;
}
EXPORT_SYMBOL
(
blk_queue_start_tag
);
/**
...
...
@@ -392,5 +387,4 @@ void blk_queue_invalidate_tags(struct request_queue *q)
list_for_each_safe
(
tmp
,
n
,
&
q
->
tag_busy_list
)
blk_requeue_request
(
q
,
list_entry_rq
(
tmp
));
}
EXPORT_SYMBOL
(
blk_queue_invalidate_tags
);
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment