Commit ad22c355 authored by Keith Busch's avatar Keith Busch Committed by Jens Axboe

nvme: remove handling of multiple AEN requests

The driver can handle tracking only one AEN request, so this patch
removes handling for multiple ones.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJames Smart  <james.smart@broadcom.com>
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 08e15075
...@@ -2670,15 +2670,7 @@ static void nvme_async_event_work(struct work_struct *work) ...@@ -2670,15 +2670,7 @@ static void nvme_async_event_work(struct work_struct *work)
struct nvme_ctrl *ctrl = struct nvme_ctrl *ctrl =
container_of(work, struct nvme_ctrl, async_event_work); container_of(work, struct nvme_ctrl, async_event_work);
spin_lock_irq(&ctrl->lock); ctrl->ops->submit_async_event(ctrl);
while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) {
int aer_idx = --ctrl->event_limit;
spin_unlock_irq(&ctrl->lock);
ctrl->ops->submit_async_event(ctrl, aer_idx);
spin_lock_irq(&ctrl->lock);
}
spin_unlock_irq(&ctrl->lock);
} }
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
...@@ -2745,22 +2737,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, ...@@ -2745,22 +2737,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
union nvme_result *res) union nvme_result *res)
{ {
u32 result = le32_to_cpu(res->u32); u32 result = le32_to_cpu(res->u32);
bool done = true;
switch (le16_to_cpu(status) >> 1) { if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
case NVME_SC_SUCCESS:
done = false;
/*FALLTHRU*/
case NVME_SC_ABORT_REQ:
++ctrl->event_limit;
if (ctrl->state == NVME_CTRL_LIVE)
queue_work(nvme_wq, &ctrl->async_event_work);
break;
default:
break;
}
if (done)
return; return;
switch (result & 0xff07) { switch (result & 0xff07) {
...@@ -2774,12 +2752,12 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, ...@@ -2774,12 +2752,12 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
default: default:
dev_warn(ctrl->device, "async event result %08x\n", result); dev_warn(ctrl->device, "async event result %08x\n", result);
} }
queue_work(nvme_wq, &ctrl->async_event_work);
} }
EXPORT_SYMBOL_GPL(nvme_complete_async_event); EXPORT_SYMBOL_GPL(nvme_complete_async_event);
void nvme_queue_async_events(struct nvme_ctrl *ctrl) void nvme_queue_async_events(struct nvme_ctrl *ctrl)
{ {
ctrl->event_limit = NVME_NR_AEN_COMMANDS;
queue_work(nvme_wq, &ctrl->async_event_work); queue_work(nvme_wq, &ctrl->async_event_work);
} }
EXPORT_SYMBOL_GPL(nvme_queue_async_events); EXPORT_SYMBOL_GPL(nvme_queue_async_events);
......
...@@ -2382,7 +2382,7 @@ nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) ...@@ -2382,7 +2382,7 @@ nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
} }
static void static void
nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx) nvme_fc_submit_async_event(struct nvme_ctrl *arg)
{ {
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
struct nvme_fc_fcp_op *aen_op; struct nvme_fc_fcp_op *aen_op;
...@@ -2390,9 +2390,6 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx) ...@@ -2390,9 +2390,6 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
bool terminating = false; bool terminating = false;
blk_status_t ret; blk_status_t ret;
if (aer_idx > NVME_NR_AEN_COMMANDS)
return;
spin_lock_irqsave(&ctrl->lock, flags); spin_lock_irqsave(&ctrl->lock, flags);
if (ctrl->flags & FCCTRL_TERMIO) if (ctrl->flags & FCCTRL_TERMIO)
terminating = true; terminating = true;
...@@ -2401,13 +2398,13 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx) ...@@ -2401,13 +2398,13 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
if (terminating) if (terminating)
return; return;
aen_op = &ctrl->aen_ops[aer_idx]; aen_op = &ctrl->aen_ops[0];
ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
NVMEFC_FCP_NODATA); NVMEFC_FCP_NODATA);
if (ret) if (ret)
dev_err(ctrl->ctrl.device, dev_err(ctrl->ctrl.device,
"failed async event work [%d]\n", aer_idx); "failed async event work\n");
} }
static void static void
......
...@@ -162,7 +162,6 @@ struct nvme_ctrl { ...@@ -162,7 +162,6 @@ struct nvme_ctrl {
u16 nssa; u16 nssa;
u16 nr_streams; u16 nr_streams;
atomic_t abort_limit; atomic_t abort_limit;
u8 event_limit;
u8 vwc; u8 vwc;
u32 vs; u32 vs;
u32 sgls; u32 sgls;
...@@ -237,7 +236,7 @@ struct nvme_ctrl_ops { ...@@ -237,7 +236,7 @@ struct nvme_ctrl_ops {
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
void (*free_ctrl)(struct nvme_ctrl *ctrl); void (*free_ctrl)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx); void (*submit_async_event)(struct nvme_ctrl *ctrl);
void (*delete_ctrl)(struct nvme_ctrl *ctrl); void (*delete_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
int (*reinit_request)(void *data, struct request *rq); int (*reinit_request)(void *data, struct request *rq);
......
...@@ -1043,7 +1043,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) ...@@ -1043,7 +1043,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
return __nvme_poll(nvmeq, tag); return __nvme_poll(nvmeq, tag);
} }
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx) static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
{ {
struct nvme_dev *dev = to_nvme_dev(ctrl); struct nvme_dev *dev = to_nvme_dev(ctrl);
struct nvme_queue *nvmeq = dev->queues[0]; struct nvme_queue *nvmeq = dev->queues[0];
...@@ -1051,7 +1051,7 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx) ...@@ -1051,7 +1051,7 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
memset(&c, 0, sizeof(c)); memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event; c.common.opcode = nvme_admin_async_event;
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH + aer_idx; c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
spin_lock_irq(&nvmeq->q_lock); spin_lock_irq(&nvmeq->q_lock);
__nvme_submit_cmd(nvmeq, &c); __nvme_submit_cmd(nvmeq, &c);
......
...@@ -1293,7 +1293,7 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue) ...@@ -1293,7 +1293,7 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
return queue->ctrl->tag_set.tags[queue_idx - 1]; return queue->ctrl->tag_set.tags[queue_idx - 1];
} }
static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx) static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
{ {
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg); struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
struct nvme_rdma_queue *queue = &ctrl->queues[0]; struct nvme_rdma_queue *queue = &ctrl->queues[0];
...@@ -1303,9 +1303,6 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx) ...@@ -1303,9 +1303,6 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
struct ib_sge sge; struct ib_sge sge;
int ret; int ret;
if (WARN_ON_ONCE(aer_idx != 0))
return;
ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
memset(cmd, 0, sizeof(*cmd)); memset(cmd, 0, sizeof(*cmd));
......
...@@ -184,7 +184,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -184,7 +184,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK; return BLK_STS_OK;
} }
static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx) static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
{ {
struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg); struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
struct nvme_loop_queue *queue = &ctrl->queues[0]; struct nvme_loop_queue *queue = &ctrl->queues[0];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment