Commit 442e19b7 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Jens Axboe

nvme-pci: open-code polling logic in nvme_poll

Given that the code is simple enough it seems better
then passing a tag by reference for each call site, also
we can now get rid of __nvme_process_cq.
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 920d13a8
...@@ -785,7 +785,7 @@ static inline bool nvme_read_cqe(struct nvme_queue *nvmeq, ...@@ -785,7 +785,7 @@ static inline bool nvme_read_cqe(struct nvme_queue *nvmeq,
return false; return false;
} }
static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag) static void nvme_process_cq(struct nvme_queue *nvmeq)
{ {
struct nvme_completion cqe; struct nvme_completion cqe;
int consumed = 0; int consumed = 0;
...@@ -793,11 +793,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag) ...@@ -793,11 +793,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag)
while (nvme_read_cqe(nvmeq, &cqe)) { while (nvme_read_cqe(nvmeq, &cqe)) {
nvme_handle_cqe(nvmeq, &cqe); nvme_handle_cqe(nvmeq, &cqe);
consumed++; consumed++;
if (tag && *tag == cqe.command_id) {
*tag = -1;
break;
}
} }
if (consumed) { if (consumed) {
...@@ -806,11 +801,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag) ...@@ -806,11 +801,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag)
} }
} }
static void nvme_process_cq(struct nvme_queue *nvmeq)
{
__nvme_process_cq(nvmeq, NULL);
}
static irqreturn_t nvme_irq(int irq, void *data) static irqreturn_t nvme_irq(int irq, void *data)
{ {
irqreturn_t result; irqreturn_t result;
...@@ -833,16 +823,28 @@ static irqreturn_t nvme_irq_check(int irq, void *data) ...@@ -833,16 +823,28 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag) static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
{ {
if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) { struct nvme_completion cqe;
spin_lock_irq(&nvmeq->q_lock); int found = 0, consumed = 0;
__nvme_process_cq(nvmeq, &tag);
spin_unlock_irq(&nvmeq->q_lock);
if (tag == -1) if (!nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
return 1; return 0;
}
return 0; spin_lock_irq(&nvmeq->q_lock);
while (nvme_read_cqe(nvmeq, &cqe)) {
nvme_handle_cqe(nvmeq, &cqe);
consumed++;
if (tag == cqe.command_id) {
found = 1;
break;
}
}
if (consumed)
nvme_ring_cq_doorbell(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
return found;
} }
static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment