Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
851c9f38
Commit
851c9f38
authored
Mar 31, 2015
by
Mike Snitzer
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'jens/for-4.1/core' into dm/for-next
parents
e9637415
c76cbbcf
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
52 additions
and
25 deletions
+52
-25
block/blk-core.c
block/blk-core.c
+16
-3
block/blk-mq-sysfs.c
block/blk-mq-sysfs.c
+1
-0
block/blk-mq.c
block/blk-mq.c
+32
-22
include/linux/blk-mq.h
include/linux/blk-mq.h
+3
-0
No files found.
block/blk-core.c
View file @
851c9f38
...
@@ -557,6 +557,18 @@ void blk_cleanup_queue(struct request_queue *q)
...
@@ -557,6 +557,18 @@ void blk_cleanup_queue(struct request_queue *q)
}
}
EXPORT_SYMBOL
(
blk_cleanup_queue
);
EXPORT_SYMBOL
(
blk_cleanup_queue
);
/* Allocate memory local to the request queue */
static
void
*
alloc_request_struct
(
gfp_t
gfp_mask
,
void
*
data
)
{
int
nid
=
(
int
)(
long
)
data
;
return
kmem_cache_alloc_node
(
request_cachep
,
gfp_mask
,
nid
);
}
static
void
free_request_struct
(
void
*
element
,
void
*
unused
)
{
kmem_cache_free
(
request_cachep
,
element
);
}
int
blk_init_rl
(
struct
request_list
*
rl
,
struct
request_queue
*
q
,
int
blk_init_rl
(
struct
request_list
*
rl
,
struct
request_queue
*
q
,
gfp_t
gfp_mask
)
gfp_t
gfp_mask
)
{
{
...
@@ -569,9 +581,10 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
...
@@ -569,9 +581,10 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
init_waitqueue_head
(
&
rl
->
wait
[
BLK_RW_SYNC
]);
init_waitqueue_head
(
&
rl
->
wait
[
BLK_RW_SYNC
]);
init_waitqueue_head
(
&
rl
->
wait
[
BLK_RW_ASYNC
]);
init_waitqueue_head
(
&
rl
->
wait
[
BLK_RW_ASYNC
]);
rl
->
rq_pool
=
mempool_create_node
(
BLKDEV_MIN_RQ
,
mempool_alloc_slab
,
rl
->
rq_pool
=
mempool_create_node
(
BLKDEV_MIN_RQ
,
alloc_request_struct
,
mempool_free_slab
,
request_cachep
,
free_request_struct
,
gfp_mask
,
q
->
node
);
(
void
*
)(
long
)
q
->
node
,
gfp_mask
,
q
->
node
);
if
(
!
rl
->
rq_pool
)
if
(
!
rl
->
rq_pool
)
return
-
ENOMEM
;
return
-
ENOMEM
;
...
...
block/blk-mq-sysfs.c
View file @
851c9f38
...
@@ -436,6 +436,7 @@ int blk_mq_register_disk(struct gendisk *disk)
...
@@ -436,6 +436,7 @@ int blk_mq_register_disk(struct gendisk *disk)
return
0
;
return
0
;
}
}
EXPORT_SYMBOL_GPL
(
blk_mq_register_disk
);
void
blk_mq_sysfs_unregister
(
struct
request_queue
*
q
)
void
blk_mq_sysfs_unregister
(
struct
request_queue
*
q
)
{
{
...
...
block/blk-mq.c
View file @
851c9f38
...
@@ -33,7 +33,6 @@ static DEFINE_MUTEX(all_q_mutex);
...
@@ -33,7 +33,6 @@ static DEFINE_MUTEX(all_q_mutex);
static
LIST_HEAD
(
all_q_list
);
static
LIST_HEAD
(
all_q_list
);
static
void
__blk_mq_run_hw_queue
(
struct
blk_mq_hw_ctx
*
hctx
);
static
void
__blk_mq_run_hw_queue
(
struct
blk_mq_hw_ctx
*
hctx
);
static
void
blk_mq_run_queues
(
struct
request_queue
*
q
);
/*
/*
* Check if any of the ctx's have pending work in this hardware queue
* Check if any of the ctx's have pending work in this hardware queue
...
@@ -78,7 +77,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
...
@@ -78,7 +77,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
clear_bit
(
CTX_TO_BIT
(
hctx
,
ctx
),
&
bm
->
word
);
clear_bit
(
CTX_TO_BIT
(
hctx
,
ctx
),
&
bm
->
word
);
}
}
static
int
blk_mq_queue_enter
(
struct
request_queue
*
q
)
static
int
blk_mq_queue_enter
(
struct
request_queue
*
q
,
gfp_t
gfp
)
{
{
while
(
true
)
{
while
(
true
)
{
int
ret
;
int
ret
;
...
@@ -86,6 +85,9 @@ static int blk_mq_queue_enter(struct request_queue *q)
...
@@ -86,6 +85,9 @@ static int blk_mq_queue_enter(struct request_queue *q)
if
(
percpu_ref_tryget_live
(
&
q
->
mq_usage_counter
))
if
(
percpu_ref_tryget_live
(
&
q
->
mq_usage_counter
))
return
0
;
return
0
;
if
(
!
(
gfp
&
__GFP_WAIT
))
return
-
EBUSY
;
ret
=
wait_event_interruptible
(
q
->
mq_freeze_wq
,
ret
=
wait_event_interruptible
(
q
->
mq_freeze_wq
,
!
q
->
mq_freeze_depth
||
blk_queue_dying
(
q
));
!
q
->
mq_freeze_depth
||
blk_queue_dying
(
q
));
if
(
blk_queue_dying
(
q
))
if
(
blk_queue_dying
(
q
))
...
@@ -118,7 +120,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
...
@@ -118,7 +120,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
if
(
freeze
)
{
if
(
freeze
)
{
percpu_ref_kill
(
&
q
->
mq_usage_counter
);
percpu_ref_kill
(
&
q
->
mq_usage_counter
);
blk_mq_run_
queues
(
q
);
blk_mq_run_
hw_queues
(
q
,
false
);
}
}
}
}
EXPORT_SYMBOL_GPL
(
blk_mq_freeze_queue_start
);
EXPORT_SYMBOL_GPL
(
blk_mq_freeze_queue_start
);
...
@@ -257,7 +259,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
...
@@ -257,7 +259,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
struct
blk_mq_alloc_data
alloc_data
;
struct
blk_mq_alloc_data
alloc_data
;
int
ret
;
int
ret
;
ret
=
blk_mq_queue_enter
(
q
);
ret
=
blk_mq_queue_enter
(
q
,
gfp
);
if
(
ret
)
if
(
ret
)
return
ERR_PTR
(
ret
);
return
ERR_PTR
(
ret
);
...
@@ -904,7 +906,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
...
@@ -904,7 +906,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
&
hctx
->
run_work
,
0
);
&
hctx
->
run_work
,
0
);
}
}
static
void
blk_mq_run_queues
(
struct
request_queue
*
q
)
void
blk_mq_run_hw_queues
(
struct
request_queue
*
q
,
bool
async
)
{
{
struct
blk_mq_hw_ctx
*
hctx
;
struct
blk_mq_hw_ctx
*
hctx
;
int
i
;
int
i
;
...
@@ -915,9 +917,10 @@ static void blk_mq_run_queues(struct request_queue *q)
...
@@ -915,9 +917,10 @@ static void blk_mq_run_queues(struct request_queue *q)
test_bit
(
BLK_MQ_S_STOPPED
,
&
hctx
->
state
))
test_bit
(
BLK_MQ_S_STOPPED
,
&
hctx
->
state
))
continue
;
continue
;
blk_mq_run_hw_queue
(
hctx
,
false
);
blk_mq_run_hw_queue
(
hctx
,
async
);
}
}
}
}
EXPORT_SYMBOL
(
blk_mq_run_hw_queues
);
void
blk_mq_stop_hw_queue
(
struct
blk_mq_hw_ctx
*
hctx
)
void
blk_mq_stop_hw_queue
(
struct
blk_mq_hw_ctx
*
hctx
)
{
{
...
@@ -1186,7 +1189,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
...
@@ -1186,7 +1189,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
int
rw
=
bio_data_dir
(
bio
);
int
rw
=
bio_data_dir
(
bio
);
struct
blk_mq_alloc_data
alloc_data
;
struct
blk_mq_alloc_data
alloc_data
;
if
(
unlikely
(
blk_mq_queue_enter
(
q
)))
{
if
(
unlikely
(
blk_mq_queue_enter
(
q
,
GFP_KERNEL
)))
{
bio_endio
(
bio
,
-
EIO
);
bio_endio
(
bio
,
-
EIO
);
return
NULL
;
return
NULL
;
}
}
...
@@ -1890,10 +1893,26 @@ void blk_mq_release(struct request_queue *q)
...
@@ -1890,10 +1893,26 @@ void blk_mq_release(struct request_queue *q)
}
}
struct
request_queue
*
blk_mq_init_queue
(
struct
blk_mq_tag_set
*
set
)
struct
request_queue
*
blk_mq_init_queue
(
struct
blk_mq_tag_set
*
set
)
{
struct
request_queue
*
uninit_q
,
*
q
;
uninit_q
=
blk_alloc_queue_node
(
GFP_KERNEL
,
set
->
numa_node
);
if
(
!
uninit_q
)
return
ERR_PTR
(
-
ENOMEM
);
q
=
blk_mq_init_allocated_queue
(
set
,
uninit_q
);
if
(
IS_ERR
(
q
))
blk_cleanup_queue
(
uninit_q
);
return
q
;
}
EXPORT_SYMBOL
(
blk_mq_init_queue
);
struct
request_queue
*
blk_mq_init_allocated_queue
(
struct
blk_mq_tag_set
*
set
,
struct
request_queue
*
q
)
{
{
struct
blk_mq_hw_ctx
**
hctxs
;
struct
blk_mq_hw_ctx
**
hctxs
;
struct
blk_mq_ctx
__percpu
*
ctx
;
struct
blk_mq_ctx
__percpu
*
ctx
;
struct
request_queue
*
q
;
unsigned
int
*
map
;
unsigned
int
*
map
;
int
i
;
int
i
;
...
@@ -1928,20 +1947,16 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
...
@@ -1928,20 +1947,16 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
hctxs
[
i
]
->
queue_num
=
i
;
hctxs
[
i
]
->
queue_num
=
i
;
}
}
q
=
blk_alloc_queue_node
(
GFP_KERNEL
,
set
->
numa_node
);
if
(
!
q
)
goto
err_hctxs
;
/*
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details.
* See blk_register_queue() for details.
*/
*/
if
(
percpu_ref_init
(
&
q
->
mq_usage_counter
,
blk_mq_usage_counter_release
,
if
(
percpu_ref_init
(
&
q
->
mq_usage_counter
,
blk_mq_usage_counter_release
,
PERCPU_REF_INIT_ATOMIC
,
GFP_KERNEL
))
PERCPU_REF_INIT_ATOMIC
,
GFP_KERNEL
))
goto
err_
mq_usage
;
goto
err_
hctxs
;
setup_timer
(
&
q
->
timeout
,
blk_mq_rq_timer
,
(
unsigned
long
)
q
);
setup_timer
(
&
q
->
timeout
,
blk_mq_rq_timer
,
(
unsigned
long
)
q
);
blk_queue_rq_timeout
(
q
,
30000
);
blk_queue_rq_timeout
(
q
,
set
->
timeout
?
set
->
timeout
:
30000
);
q
->
nr_queues
=
nr_cpu_ids
;
q
->
nr_queues
=
nr_cpu_ids
;
q
->
nr_hw_queues
=
set
->
nr_hw_queues
;
q
->
nr_hw_queues
=
set
->
nr_hw_queues
;
...
@@ -1967,9 +1982,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
...
@@ -1967,9 +1982,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
else
else
blk_queue_make_request
(
q
,
blk_sq_make_request
);
blk_queue_make_request
(
q
,
blk_sq_make_request
);
if
(
set
->
timeout
)
blk_queue_rq_timeout
(
q
,
set
->
timeout
);
/*
/*
* Do this after blk_queue_make_request() overrides it...
* Do this after blk_queue_make_request() overrides it...
*/
*/
...
@@ -1981,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
...
@@ -1981,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
blk_mq_init_cpu_queues
(
q
,
set
->
nr_hw_queues
);
blk_mq_init_cpu_queues
(
q
,
set
->
nr_hw_queues
);
if
(
blk_mq_init_hw_queues
(
q
,
set
))
if
(
blk_mq_init_hw_queues
(
q
,
set
))
goto
err_
mq_usage
;
goto
err_
hctxs
;
mutex_lock
(
&
all_q_mutex
);
mutex_lock
(
&
all_q_mutex
);
list_add_tail
(
&
q
->
all_q_node
,
&
all_q_list
);
list_add_tail
(
&
q
->
all_q_node
,
&
all_q_list
);
...
@@ -1993,8 +2005,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
...
@@ -1993,8 +2005,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
return
q
;
return
q
;
err_mq_usage:
blk_cleanup_queue
(
q
);
err_hctxs:
err_hctxs:
kfree
(
map
);
kfree
(
map
);
for
(
i
=
0
;
i
<
set
->
nr_hw_queues
;
i
++
)
{
for
(
i
=
0
;
i
<
set
->
nr_hw_queues
;
i
++
)
{
...
@@ -2009,7 +2019,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
...
@@ -2009,7 +2019,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
free_percpu
(
ctx
);
free_percpu
(
ctx
);
return
ERR_PTR
(
-
ENOMEM
);
return
ERR_PTR
(
-
ENOMEM
);
}
}
EXPORT_SYMBOL
(
blk_mq_init_queue
);
EXPORT_SYMBOL
(
blk_mq_init_
allocated_
queue
);
void
blk_mq_free_queue
(
struct
request_queue
*
q
)
void
blk_mq_free_queue
(
struct
request_queue
*
q
)
{
{
...
@@ -2161,7 +2171,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
...
@@ -2161,7 +2171,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if
(
set
->
queue_depth
<
set
->
reserved_tags
+
BLK_MQ_TAG_MIN
)
if
(
set
->
queue_depth
<
set
->
reserved_tags
+
BLK_MQ_TAG_MIN
)
return
-
EINVAL
;
return
-
EINVAL
;
if
(
!
set
->
nr_hw_queues
||
!
set
->
ops
->
queue_rq
||
!
set
->
ops
->
map_queue
)
if
(
!
set
->
ops
->
queue_rq
||
!
set
->
ops
->
map_queue
)
return
-
EINVAL
;
return
-
EINVAL
;
if
(
set
->
queue_depth
>
BLK_MQ_MAX_DEPTH
)
{
if
(
set
->
queue_depth
>
BLK_MQ_MAX_DEPTH
)
{
...
...
include/linux/blk-mq.h
View file @
851c9f38
...
@@ -164,6 +164,8 @@ enum {
...
@@ -164,6 +164,8 @@ enum {
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
struct
request_queue
*
blk_mq_init_queue
(
struct
blk_mq_tag_set
*
);
struct
request_queue
*
blk_mq_init_queue
(
struct
blk_mq_tag_set
*
);
struct
request_queue
*
blk_mq_init_allocated_queue
(
struct
blk_mq_tag_set
*
set
,
struct
request_queue
*
q
);
void
blk_mq_finish_init
(
struct
request_queue
*
q
);
void
blk_mq_finish_init
(
struct
request_queue
*
q
);
int
blk_mq_register_disk
(
struct
gendisk
*
);
int
blk_mq_register_disk
(
struct
gendisk
*
);
void
blk_mq_unregister_disk
(
struct
gendisk
*
);
void
blk_mq_unregister_disk
(
struct
gendisk
*
);
...
@@ -218,6 +220,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
...
@@ -218,6 +220,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void
blk_mq_stop_hw_queues
(
struct
request_queue
*
q
);
void
blk_mq_stop_hw_queues
(
struct
request_queue
*
q
);
void
blk_mq_start_hw_queues
(
struct
request_queue
*
q
);
void
blk_mq_start_hw_queues
(
struct
request_queue
*
q
);
void
blk_mq_start_stopped_hw_queues
(
struct
request_queue
*
q
,
bool
async
);
void
blk_mq_start_stopped_hw_queues
(
struct
request_queue
*
q
,
bool
async
);
void
blk_mq_run_hw_queues
(
struct
request_queue
*
q
,
bool
async
);
void
blk_mq_delay_queue
(
struct
blk_mq_hw_ctx
*
hctx
,
unsigned
long
msecs
);
void
blk_mq_delay_queue
(
struct
blk_mq_hw_ctx
*
hctx
,
unsigned
long
msecs
);
void
blk_mq_tag_busy_iter
(
struct
blk_mq_hw_ctx
*
hctx
,
busy_iter_fn
*
fn
,
void
blk_mq_tag_busy_iter
(
struct
blk_mq_hw_ctx
*
hctx
,
busy_iter_fn
*
fn
,
void
*
priv
);
void
*
priv
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment