Commit e42b3867 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Christoph Hellwig

blk-mq-rdma: pass in queue map to blk_mq_rdma_map_queues

Will be used by nvme-rdma for queue map separation support.
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 23454d59
......@@ -29,24 +29,24 @@
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a
* vector, we fallback to the naive mapping.
*/
int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec)
{
const struct cpumask *mask;
unsigned int queue, cpu;
for (queue = 0; queue < set->nr_hw_queues; queue++) {
for (queue = 0; queue < map->nr_queues; queue++) {
mask = ib_get_vector_affinity(dev, first_vec + queue);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
set->map[0].mq_map[cpu] = queue;
map->mq_map[cpu] = map->queue_offset + queue;
}
return 0;
fallback:
return blk_mq_map_queues(&set->map[0]);
return blk_mq_map_queues(map);
}
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
......@@ -1751,7 +1751,7 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_rdma_ctrl *ctrl = set->driver_data;
return blk_mq_rdma_map_queues(set, ctrl->device->dev, 0);
return blk_mq_rdma_map_queues(&set->map[0], ctrl->device->dev, 0);
}
static const struct blk_mq_ops nvme_rdma_mq_ops = {
......
......@@ -4,7 +4,7 @@
struct blk_mq_tag_set;
struct ib_device;
int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec);
#endif /* _LINUX_BLK_MQ_RDMA_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment