Commit 442e19b7 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Jens Axboe

nvme-pci: open-code polling logic in nvme_poll

Given that the code is simple enough it seems better
then passing a tag by reference for each call site, also
we can now get rid of __nvme_process_cq.
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 920d13a8
...@@ -785,7 +785,7 @@ static inline bool nvme_read_cqe(struct nvme_queue *nvmeq, ...@@ -785,7 +785,7 @@ static inline bool nvme_read_cqe(struct nvme_queue *nvmeq,
return false; return false;
} }
static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag) static void nvme_process_cq(struct nvme_queue *nvmeq)
{ {
struct nvme_completion cqe; struct nvme_completion cqe;
int consumed = 0; int consumed = 0;
...@@ -793,11 +793,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag) ...@@ -793,11 +793,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag)
while (nvme_read_cqe(nvmeq, &cqe)) { while (nvme_read_cqe(nvmeq, &cqe)) {
nvme_handle_cqe(nvmeq, &cqe); nvme_handle_cqe(nvmeq, &cqe);
consumed++; consumed++;
if (tag && *tag == cqe.command_id) {
*tag = -1;
break;
}
} }
if (consumed) { if (consumed) {
...@@ -806,11 +801,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag) ...@@ -806,11 +801,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag)
} }
} }
static void nvme_process_cq(struct nvme_queue *nvmeq)
{
__nvme_process_cq(nvmeq, NULL);
}
static irqreturn_t nvme_irq(int irq, void *data) static irqreturn_t nvme_irq(int irq, void *data)
{ {
irqreturn_t result; irqreturn_t result;
...@@ -833,16 +823,28 @@ static irqreturn_t nvme_irq_check(int irq, void *data) ...@@ -833,16 +823,28 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag) static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
{ {
if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) { struct nvme_completion cqe;
int found = 0, consumed = 0;
if (!nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
return 0;
spin_lock_irq(&nvmeq->q_lock); spin_lock_irq(&nvmeq->q_lock);
__nvme_process_cq(nvmeq, &tag); while (nvme_read_cqe(nvmeq, &cqe)) {
spin_unlock_irq(&nvmeq->q_lock); nvme_handle_cqe(nvmeq, &cqe);
consumed++;
if (tag == -1) if (tag == cqe.command_id) {
return 1; found = 1;
break;
}
} }
return 0; if (consumed)
nvme_ring_cq_doorbell(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
return found;
} }
static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment