Commit 5ea708d1 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: simplify blk_init_allocated_queue

Return an errno value instead of the passed in queue so that the callers
don't have to keep track of two queues, and move the assignment of the
request_fn and lock to the caller as passing them as argument doesn't
simplify anything.  While we're at it also remove two pointless NULL
assignments, given that the request structure is zeroed on allocation.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent e6f7f93d
...@@ -823,15 +823,19 @@ EXPORT_SYMBOL(blk_init_queue); ...@@ -823,15 +823,19 @@ EXPORT_SYMBOL(blk_init_queue);
struct request_queue * struct request_queue *
blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{ {
struct request_queue *uninit_q, *q; struct request_queue *q;
uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); q = blk_alloc_queue_node(GFP_KERNEL, node_id);
if (!uninit_q) if (!q)
return NULL; return NULL;
q = blk_init_allocated_queue(uninit_q, rfn, lock); q->request_fn = rfn;
if (!q) if (lock)
blk_cleanup_queue(uninit_q); q->queue_lock = lock;
if (blk_init_allocated_queue(q) < 0) {
blk_cleanup_queue(q);
return NULL;
}
return q; return q;
} }
...@@ -839,30 +843,19 @@ EXPORT_SYMBOL(blk_init_queue_node); ...@@ -839,30 +843,19 @@ EXPORT_SYMBOL(blk_init_queue_node);
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio); static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
struct request_queue *
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
spinlock_t *lock)
{
if (!q)
return NULL;
int blk_init_allocated_queue(struct request_queue *q)
{
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0); q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0);
if (!q->fq) if (!q->fq)
return NULL; return -ENOMEM;
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
goto fail; goto fail;
INIT_WORK(&q->timeout_work, blk_timeout_work); INIT_WORK(&q->timeout_work, blk_timeout_work);
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
q->queue_flags |= QUEUE_FLAG_DEFAULT; q->queue_flags |= QUEUE_FLAG_DEFAULT;
/* Override internal queue lock with supplied lock pointer */
if (lock)
q->queue_lock = lock;
/* /*
* This also sets hw/phys segments, boundary and size * This also sets hw/phys segments, boundary and size
*/ */
...@@ -880,13 +873,12 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, ...@@ -880,13 +873,12 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
} }
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return 0;
return q;
fail: fail:
blk_free_flush_queue(q->fq); blk_free_flush_queue(q->fq);
wbt_exit(q); wbt_exit(q);
return NULL; return -ENOMEM;
} }
EXPORT_SYMBOL(blk_init_allocated_queue); EXPORT_SYMBOL(blk_init_allocated_queue);
......
...@@ -823,7 +823,8 @@ static void dm_old_request_fn(struct request_queue *q) ...@@ -823,7 +823,8 @@ static void dm_old_request_fn(struct request_queue *q)
int dm_old_init_request_queue(struct mapped_device *md) int dm_old_init_request_queue(struct mapped_device *md)
{ {
/* Fully initialize the queue */ /* Fully initialize the queue */
if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL)) md->queue->request_fn = dm_old_request_fn;
if (blk_init_allocated_queue(md->queue) < 0)
return -EINVAL; return -EINVAL;
/* disable dm_old_request_fn's merge heuristic by default */ /* disable dm_old_request_fn's merge heuristic by default */
......
...@@ -1137,8 +1137,7 @@ extern void blk_unprep_request(struct request *); ...@@ -1137,8 +1137,7 @@ extern void blk_unprep_request(struct request *);
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id); spinlock_t *lock, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
extern struct request_queue *blk_init_allocated_queue(struct request_queue *, extern int blk_init_allocated_queue(struct request_queue *);
request_fn_proc *, spinlock_t *);
extern void blk_cleanup_queue(struct request_queue *); extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64); extern void blk_queue_bounce_limit(struct request_queue *, u64);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment