Commit 6d469642 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: remove the lock argument to blk_alloc_queue_node

With the legacy request path gone there is no real need to override the
queue_lock.
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 310df020
...@@ -393,7 +393,7 @@ EXPORT_SYMBOL(blk_cleanup_queue); ...@@ -393,7 +393,7 @@ EXPORT_SYMBOL(blk_cleanup_queue);
struct request_queue *blk_alloc_queue(gfp_t gfp_mask) struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{ {
return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL); return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
} }
EXPORT_SYMBOL(blk_alloc_queue); EXPORT_SYMBOL(blk_alloc_queue);
...@@ -473,17 +473,8 @@ static void blk_rq_timed_out_timer(struct timer_list *t) ...@@ -473,17 +473,8 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
* blk_alloc_queue_node - allocate a request queue * blk_alloc_queue_node - allocate a request queue
* @gfp_mask: memory allocation flags * @gfp_mask: memory allocation flags
* @node_id: NUMA node to allocate memory from * @node_id: NUMA node to allocate memory from
* @lock: For legacy queues, pointer to a spinlock that will be used to e.g.
* serialize calls to the legacy .request_fn() callback. Ignored for
* blk-mq request queues.
*
* Note: pass the queue lock as the third argument to this function instead of
* setting the queue lock pointer explicitly to avoid triggering a sporadic
* crash in the blkcg code. This function namely calls blkcg_init_queue() and
* the queue lock pointer must be set before blkcg_init_queue() is called.
*/ */
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
spinlock_t *lock)
{ {
struct request_queue *q; struct request_queue *q;
int ret; int ret;
...@@ -534,8 +525,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, ...@@ -534,8 +525,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
#endif #endif
mutex_init(&q->sysfs_lock); mutex_init(&q->sysfs_lock);
spin_lock_init(&q->__queue_lock); spin_lock_init(&q->__queue_lock);
q->queue_lock = &q->__queue_lock;
q->queue_lock = lock ? : &q->__queue_lock;
init_waitqueue_head(&q->mq_freeze_wq); init_waitqueue_head(&q->mq_freeze_wq);
......
...@@ -2548,7 +2548,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) ...@@ -2548,7 +2548,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{ {
struct request_queue *uninit_q, *q; struct request_queue *uninit_q, *q;
uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL); uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
if (!uninit_q) if (!uninit_q)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
...@@ -2792,7 +2792,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig ...@@ -2792,7 +2792,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_init_set_defaults(device); drbd_init_set_defaults(device);
q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL); q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
if (!q) if (!q)
goto out_no_q; goto out_no_q;
device->rq_queue = q; device->rq_queue = q;
......
...@@ -1659,8 +1659,7 @@ static int null_add_dev(struct nullb_device *dev) ...@@ -1659,8 +1659,7 @@ static int null_add_dev(struct nullb_device *dev)
} }
null_init_queues(nullb); null_init_queues(nullb);
} else if (dev->queue_mode == NULL_Q_BIO) { } else if (dev->queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node, nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
NULL);
if (!nullb->q) { if (!nullb->q) {
rv = -ENOMEM; rv = -ENOMEM;
goto out_cleanup_queues; goto out_cleanup_queues;
......
...@@ -888,7 +888,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) ...@@ -888,7 +888,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
card->biotail = &card->bio; card->biotail = &card->bio;
spin_lock_init(&card->lock); spin_lock_init(&card->lock);
card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL); card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
if (!card->queue) if (!card->queue)
goto failed_alloc; goto failed_alloc;
......
...@@ -389,7 +389,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) ...@@ -389,7 +389,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
goto err_dev; goto err_dev;
} }
tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL); tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
if (!tqueue) { if (!tqueue) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_disk; goto err_disk;
......
...@@ -1896,7 +1896,7 @@ static struct mapped_device *alloc_dev(int minor) ...@@ -1896,7 +1896,7 @@ static struct mapped_device *alloc_dev(int minor)
INIT_LIST_HEAD(&md->table_devices); INIT_LIST_HEAD(&md->table_devices);
spin_lock_init(&md->uevent_lock); spin_lock_init(&md->uevent_lock);
md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL); md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
if (!md->queue) if (!md->queue)
goto bad; goto bad;
md->queue->queuedata = md; md->queue->queuedata = md;
......
...@@ -393,7 +393,7 @@ static int pmem_attach_disk(struct device *dev, ...@@ -393,7 +393,7 @@ static int pmem_attach_disk(struct device *dev,
return -EBUSY; return -EBUSY;
} }
q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev), NULL); q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
if (!q) if (!q)
return -ENOMEM; return -ENOMEM;
......
...@@ -276,7 +276,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) ...@@ -276,7 +276,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath) if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
return 0; return 0;
q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL); q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
if (!q) if (!q)
goto out; goto out;
q->queuedata = head; q->queuedata = head;
......
...@@ -1122,8 +1122,7 @@ extern long nr_blockdev_pages(void); ...@@ -1122,8 +1122,7 @@ extern long nr_blockdev_pages(void);
bool __must_check blk_get_queue(struct request_queue *); bool __must_check blk_get_queue(struct request_queue *);
struct request_queue *blk_alloc_queue(gfp_t); struct request_queue *blk_alloc_queue(gfp_t);
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id);
spinlock_t *lock);
extern void blk_put_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment