Commit 83a12fb7 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Jens Axboe

nvme-pci: factor out cqe handling into a dedicated routine

Makes the code slightly more readable.
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent eb281c82
...@@ -741,6 +741,35 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) ...@@ -741,6 +741,35 @@ static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
} }
} }
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
struct nvme_completion *cqe)
{
struct request *req;
if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpu(cqe->sq_id));
return;
}
/*
* AEN requests are special as they don't time out and can
* survive any kind of queue freeze and often don't respond to
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
if (unlikely(nvmeq->qid == 0 &&
cqe->command_id >= NVME_AQ_BLKMQ_DEPTH)) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
}
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
nvme_end_request(req, cqe->status, cqe->result);
}
static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
{ {
u16 head, phase; u16 head, phase;
...@@ -750,7 +779,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) ...@@ -750,7 +779,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
while (nvme_cqe_valid(nvmeq, head, phase)) { while (nvme_cqe_valid(nvmeq, head, phase)) {
struct nvme_completion cqe = nvmeq->cqes[head]; struct nvme_completion cqe = nvmeq->cqes[head];
struct request *req;
if (++head == nvmeq->q_depth) { if (++head == nvmeq->q_depth) {
head = 0; head = 0;
...@@ -760,28 +788,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) ...@@ -760,28 +788,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
if (tag && *tag == cqe.command_id) if (tag && *tag == cqe.command_id)
*tag = -1; *tag = -1;
if (unlikely(cqe.command_id >= nvmeq->q_depth)) { nvme_handle_cqe(nvmeq, &cqe);
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
cqe.command_id, le16_to_cpu(cqe.sq_id));
continue;
}
/*
* AEN requests are special as they don't time out and can
* survive any kind of queue freeze and often don't respond to
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
if (unlikely(nvmeq->qid == 0 &&
cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe.status, &cqe.result);
continue;
}
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
nvme_end_request(req, cqe.status, cqe.result);
} }
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment