Commit 4ee69866 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Three fixes, two regressions and one that poses a problem in blk-mq
  with the new nvmef code"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: skip unmapped queues in blk_mq_alloc_request_hctx
  nvme-rdma: only clear queue flags after successful connect
  blk-throttle: Extend slice if throttle group is not empty
parents b22734a5 c8712c6a
...@@ -296,17 +296,29 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, ...@@ -296,17 +296,29 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
/*
* Check if the hardware context is actually mapped to anything.
* If not tell the caller that it should skip this queue.
*/
hctx = q->queue_hw_ctx[hctx_idx]; hctx = q->queue_hw_ctx[hctx_idx];
if (!blk_mq_hw_queue_mapped(hctx)) {
ret = -EXDEV;
goto out_queue_exit;
}
ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw, 0); rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
if (!rq) { if (!rq) {
blk_queue_exit(q); ret = -EWOULDBLOCK;
return ERR_PTR(-EWOULDBLOCK); goto out_queue_exit;
} }
return rq; return rq;
out_queue_exit:
blk_queue_exit(q);
return ERR_PTR(ret);
} }
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
......
...@@ -780,9 +780,11 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, ...@@ -780,9 +780,11 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
/* /*
* If previous slice expired, start a new one otherwise renew/extend * If previous slice expired, start a new one otherwise renew/extend
* existing slice to make sure it is at least throtl_slice interval * existing slice to make sure it is at least throtl_slice interval
* long since now. * long since now. New slice is started only for empty throttle group.
* If there is queued bio, that means there should be an active
* slice and it should be extended instead.
*/ */
if (throtl_slice_used(tg, rw)) if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
throtl_start_new_slice(tg, rw); throtl_start_new_slice(tg, rw);
else { else {
if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
......
...@@ -561,7 +561,6 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -561,7 +561,6 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
queue = &ctrl->queues[idx]; queue = &ctrl->queues[idx];
queue->ctrl = ctrl; queue->ctrl = ctrl;
queue->flags = 0;
init_completion(&queue->cm_done); init_completion(&queue->cm_done);
if (idx > 0) if (idx > 0)
...@@ -595,6 +594,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -595,6 +594,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
goto out_destroy_cm_id; goto out_destroy_cm_id;
} }
clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags); set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment