Commit 5ee0524b authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

block: Add 'lock' as third argument to blk_alloc_queue_node()

This patch does not change any functionality.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: default avatarJoseph Qi <joseph.qi@linux.alibaba.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Philipp Reisner <philipp.reisner@linbit.com>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 392db380
...@@ -810,7 +810,7 @@ void blk_exit_rl(struct request_queue *q, struct request_list *rl) ...@@ -810,7 +810,7 @@ void blk_exit_rl(struct request_queue *q, struct request_list *rl)
struct request_queue *blk_alloc_queue(gfp_t gfp_mask) struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{ {
return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL);
} }
EXPORT_SYMBOL(blk_alloc_queue); EXPORT_SYMBOL(blk_alloc_queue);
...@@ -888,7 +888,8 @@ static void blk_rq_timed_out_timer(struct timer_list *t) ...@@ -888,7 +888,8 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
kblockd_schedule_work(&q->timeout_work); kblockd_schedule_work(&q->timeout_work);
} }
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
spinlock_t *lock)
{ {
struct request_queue *q; struct request_queue *q;
...@@ -1030,7 +1031,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) ...@@ -1030,7 +1031,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{ {
struct request_queue *q; struct request_queue *q;
q = blk_alloc_queue_node(GFP_KERNEL, node_id); q = blk_alloc_queue_node(GFP_KERNEL, node_id, NULL);
if (!q) if (!q)
return NULL; return NULL;
......
...@@ -2556,7 +2556,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) ...@@ -2556,7 +2556,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{ {
struct request_queue *uninit_q, *q; struct request_queue *uninit_q, *q;
uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL);
if (!uninit_q) if (!uninit_q)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
...@@ -1760,7 +1760,8 @@ static int null_add_dev(struct nullb_device *dev) ...@@ -1760,7 +1760,8 @@ static int null_add_dev(struct nullb_device *dev)
} }
null_init_queues(nullb); null_init_queues(nullb);
} else if (dev->queue_mode == NULL_Q_BIO) { } else if (dev->queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node); nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node,
NULL);
if (!nullb->q) { if (!nullb->q) {
rv = -ENOMEM; rv = -ENOMEM;
goto out_cleanup_queues; goto out_cleanup_queues;
......
...@@ -766,7 +766,7 @@ static int ide_init_queue(ide_drive_t *drive) ...@@ -766,7 +766,7 @@ static int ide_init_queue(ide_drive_t *drive)
* limits and LBA48 we could raise it but as yet * limits and LBA48 we could raise it but as yet
* do not. * do not.
*/ */
q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif)); q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif), NULL);
if (!q) if (!q)
return 1; return 1;
......
...@@ -384,7 +384,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) ...@@ -384,7 +384,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
goto err_dev; goto err_dev;
} }
tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL);
if (!tqueue) { if (!tqueue) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_disk; goto err_disk;
......
...@@ -1841,7 +1841,7 @@ static struct mapped_device *alloc_dev(int minor) ...@@ -1841,7 +1841,7 @@ static struct mapped_device *alloc_dev(int minor)
INIT_LIST_HEAD(&md->table_devices); INIT_LIST_HEAD(&md->table_devices);
spin_lock_init(&md->uevent_lock); spin_lock_init(&md->uevent_lock);
md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL);
if (!md->queue) if (!md->queue)
goto bad; goto bad;
md->queue->queuedata = md; md->queue->queuedata = md;
......
...@@ -344,7 +344,7 @@ static int pmem_attach_disk(struct device *dev, ...@@ -344,7 +344,7 @@ static int pmem_attach_disk(struct device *dev,
return -EBUSY; return -EBUSY;
} }
q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev)); q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev), NULL);
if (!q) if (!q)
return -ENOMEM; return -ENOMEM;
......
...@@ -162,7 +162,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) ...@@ -162,7 +162,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath) if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
return 0; return 0;
q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE); q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
if (!q) if (!q)
goto out; goto out;
q->queuedata = head; q->queuedata = head;
......
...@@ -2223,7 +2223,7 @@ struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev) ...@@ -2223,7 +2223,7 @@ struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
struct Scsi_Host *shost = sdev->host; struct Scsi_Host *shost = sdev->host;
struct request_queue *q; struct request_queue *q;
q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE); q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
if (!q) if (!q)
return NULL; return NULL;
q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
......
...@@ -1321,7 +1321,8 @@ extern long nr_blockdev_pages(void); ...@@ -1321,7 +1321,8 @@ extern long nr_blockdev_pages(void);
bool __must_check blk_get_queue(struct request_queue *); bool __must_check blk_get_queue(struct request_queue *);
struct request_queue *blk_alloc_queue(gfp_t); struct request_queue *blk_alloc_queue(gfp_t);
struct request_queue *blk_alloc_queue_node(gfp_t, int); struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
spinlock_t *lock);
extern void blk_put_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment