Commit 72fbbc3d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.19/block-exec-2022-06-02' of git://git.kernel.dk/linux-block

Pull block request execute cleanups from Jens Axboe:
 "This change was advertised in the initial core block pull request, but
  didn't actually make that branch as we deferred it to a post-merge
  pull request to avoid a bunch of cross branch issues.

  This series cleans up the block execute path quite nicely"

* tag 'for-5.19/block-exec-2022-06-02' of git://git.kernel.dk/linux-block:
  blk-mq: remove the done argument to blk_execute_rq_nowait
  blk-mq: avoid a mess of casts for blk_end_sync_rq
  blk-mq: remove __blk_execute_rq_nowait
parents 34845d92 e2e53086
......@@ -1152,24 +1152,6 @@ void blk_mq_start_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_start_request);
/**
* blk_end_sync_rq - executes a completion event on a request
* @rq: request to complete
* @error: end I/O status of the request
*/
static void blk_end_sync_rq(struct request *rq, blk_status_t error)
{
struct completion *waiting = rq->end_io_data;
rq->end_io_data = (void *)(uintptr_t)error;
/*
* complete last, if this is a stack request the process (and thus
* the rq pointer) could be invalid right after this complete()
*/
complete(waiting);
}
/*
* Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
* queues. This is important for md arrays to benefit from merging
......@@ -1204,33 +1186,10 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
plug->rq_count++;
}
static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
rq_end_io_fn *done, bool use_plug)
{
WARN_ON(irqs_disabled());
WARN_ON(!blk_rq_is_passthrough(rq));
rq->end_io = done;
blk_account_io_start(rq);
if (use_plug && current->plug) {
blk_add_rq_to_plug(current->plug, rq);
return;
}
/*
* don't check dying flag for MQ because the request won't
* be reused after dying flag is set
*/
blk_mq_sched_insert_request(rq, at_head, true, false);
}
/**
* blk_execute_rq_nowait - insert a request to I/O scheduler for execution
* @rq: request to insert
* @at_head: insert request at head or tail of queue
* @done: I/O completion handler
*
* Description:
* Insert a fully prepared request at the back of the I/O scheduler queue
......@@ -1239,13 +1198,32 @@ static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
* Note:
* This function will invoke @done directly if the queue is dead.
*/
void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
void blk_execute_rq_nowait(struct request *rq, bool at_head)
{
__blk_execute_rq_nowait(rq, at_head, done, true);
WARN_ON(irqs_disabled());
WARN_ON(!blk_rq_is_passthrough(rq));
blk_account_io_start(rq);
if (current->plug)
blk_add_rq_to_plug(current->plug, rq);
else
blk_mq_sched_insert_request(rq, at_head, true, false);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
struct blk_rq_wait {
struct completion done;
blk_status_t ret;
};
static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
{
struct blk_rq_wait *wait = rq->end_io_data;
wait->ret = ret;
complete(&wait->done);
}
static bool blk_rq_is_poll(struct request *rq)
{
if (!rq->mq_hctx)
......@@ -1277,30 +1255,37 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
*/
blk_status_t blk_execute_rq(struct request *rq, bool at_head)
{
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long hang_check;
struct blk_rq_wait wait = {
.done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
};
WARN_ON(irqs_disabled());
WARN_ON(!blk_rq_is_passthrough(rq));
/*
* iopoll requires request to be submitted to driver, so can't
* use plug
*/
rq->end_io_data = &wait;
__blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq,
!blk_rq_is_poll(rq));
/* Prevent hang_check timer from firing at us during very long I/O */
hang_check = sysctl_hung_task_timeout_secs;
if (blk_rq_is_poll(rq))
blk_rq_poll_completion(rq, &wait);
else if (hang_check)
while (!wait_for_completion_io_timeout(&wait,
hang_check * (HZ/2)))
;
else
wait_for_completion_io(&wait);
rq->end_io = blk_end_sync_rq;
blk_account_io_start(rq);
blk_mq_sched_insert_request(rq, at_head, true, false);
if (blk_rq_is_poll(rq)) {
blk_rq_poll_completion(rq, &wait.done);
} else {
/*
* Prevent hang_check timer from firing at us during very long
* I/O
*/
unsigned long hang_check = sysctl_hung_task_timeout_secs;
if (hang_check)
while (!wait_for_completion_io_timeout(&wait.done,
hang_check * (HZ/2)))
;
else
wait_for_completion_io(&wait.done);
}
return (blk_status_t)(uintptr_t)rq->end_io_data;
return wait.ret;
}
EXPORT_SYMBOL(blk_execute_rq);
......
......@@ -540,7 +540,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
spin_unlock_irq(&host->lock);
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
blk_execute_rq_nowait(rq, true, NULL);
blk_execute_rq_nowait(rq, true);
return 0;
......@@ -579,7 +579,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
crq->msg_bucket = (u32) rc;
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
blk_execute_rq_nowait(rq, true, NULL);
blk_execute_rq_nowait(rq, true);
return 0;
}
......
......@@ -1206,9 +1206,10 @@ static void nvme_keep_alive_work(struct work_struct *work)
nvme_init_request(rq, &ctrl->ka_cmd);
rq->timeout = ctrl->kato * HZ;
rq->end_io = nvme_keep_alive_end_io;
rq->end_io_data = ctrl;
rq->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
blk_execute_rq_nowait(rq, false);
}
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
......
......@@ -453,6 +453,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
blk_flags);
if (IS_ERR(req))
return PTR_ERR(req);
req->end_io = nvme_uring_cmd_end_io;
req->end_io_data = ioucmd;
/* to free bio on completion, as req->bio will be null at that time */
......@@ -461,7 +462,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
pdu->meta_len = d.metadata_len;
blk_execute_rq_nowait(req, 0, nvme_uring_cmd_end_io);
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
}
......
......@@ -1438,9 +1438,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
}
nvme_init_request(abort_req, &cmd);
abort_req->end_io = abort_endio;
abort_req->end_io_data = NULL;
abort_req->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(abort_req, false, abort_endio);
blk_execute_rq_nowait(abort_req, false);
/*
* The aborted req will be completed on receiving the abort req.
......@@ -2485,12 +2486,15 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return PTR_ERR(req);
nvme_init_request(req, &cmd);
if (opcode == nvme_admin_delete_cq)
req->end_io = nvme_del_cq_end;
else
req->end_io = nvme_del_queue_end;
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
req->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ?
nvme_del_cq_end : nvme_del_queue_end);
blk_execute_rq_nowait(req, false);
return 0;
}
......
......@@ -285,8 +285,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
req->p.rq = rq;
queue_work(nvmet_wq, &req->p.work);
} else {
rq->end_io = nvmet_passthru_req_done;
rq->end_io_data = req;
blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
blk_execute_rq_nowait(rq, false);
}
if (ns)
......
......@@ -2039,12 +2039,13 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
scmd->cmnd[5] = 0;
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
scmd->allowed = 5;
req->rq_flags |= RQF_QUIET;
req->timeout = 10 * HZ;
scmd->allowed = 5;
req->end_io = eh_lock_door_done;
blk_execute_rq_nowait(req, true, eh_lock_door_done);
blk_execute_rq_nowait(req, true);
}
/**
......
......@@ -831,7 +831,8 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io);
srp->rq->end_io = sg_rq_end_io;
blk_execute_rq_nowait(srp->rq, at_head);
return 0;
}
......
......@@ -579,9 +579,10 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
memcpy(scmd->cmnd, cmd, scmd->cmd_len);
req->timeout = timeout;
scmd->allowed = retries;
req->end_io = st_scsi_execute_end;
req->end_io_data = SRpnt;
blk_execute_rq_nowait(req, true, st_scsi_execute_end);
blk_execute_rq_nowait(req, true);
return 0;
}
......
......@@ -671,11 +671,12 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
req->timeout = 0;
req->end_io_data = umap_req;
req->end_io = ufshpb_umap_req_compl_fn;
ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
blk_execute_rq_nowait(req, true);
hpb->stats.umap_req_cnt++;
}
......@@ -707,6 +708,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
blk_rq_append_bio(req, map_req->bio);
req->end_io_data = map_req;
req->end_io = ufshpb_map_req_compl_fn;
if (unlikely(last))
mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
......@@ -716,7 +718,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
map_req->rb.srgn_idx, mem_size);
scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
blk_execute_rq_nowait(req, true);
hpb->stats.map_req_cnt++;
return 0;
......
......@@ -972,8 +972,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
cmd->priv = scmd->cmnd;
blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG,
pscsi_req_done);
blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG);
return 0;
......
......@@ -969,8 +969,7 @@ int blk_rq_unmap_user(struct bio *);
int blk_rq_map_kern(struct request_queue *, struct request *, void *,
unsigned int, gfp_t);
int blk_rq_append_bio(struct request *rq, struct bio *bio);
void blk_execute_rq_nowait(struct request *rq, bool at_head,
rq_end_io_fn *end_io);
void blk_execute_rq_nowait(struct request *rq, bool at_head);
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
struct req_iterator {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment