Commit b9bce6e5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.0-2022-08-19' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A few fixes that should go into this release:

   - Small series of patches for ublk (ZiyangZhang)

   - Remove dead function (Yu)

   - Fix for running a block queue in case of resource starvation
     (Yufen)"

* tag 'block-6.0-2022-08-19' of git://git.kernel.dk/linux-block:
  blk-mq: run queue no matter whether the request is the last request
  blk-mq: remove unused function blk_mq_queue_stopped()
  ublk_drv: do not add a re-issued request aborted previously to ioucmd's task_work
  ublk_drv: update comment for __ublk_fail_req()
  ublk_drv: check ubq_daemon_is_dying() in __ublk_rq_task_work()
  ublk_drv: update iod->addr for UBLK_IO_NEED_GET_DATA
parents beaf1397 d3b38596
...@@ -2229,26 +2229,6 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) ...@@ -2229,26 +2229,6 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
} }
EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
/**
* blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
* @q: request queue.
*
* The caller is responsible for serializing this function against
* blk_mq_{start,stop}_hw_queue().
*/
bool blk_mq_queue_stopped(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned long i;
queue_for_each_hw_ctx(q, hctx, i)
if (blk_mq_hctx_stopped(hctx))
return true;
return false;
}
EXPORT_SYMBOL(blk_mq_queue_stopped);
/* /*
* This function is often used for pausing .queue_rq() by driver when * This function is often used for pausing .queue_rq() by driver when
* there isn't enough resource or some conditions aren't satisfied, and * there isn't enough resource or some conditions aren't satisfied, and
...@@ -2570,7 +2550,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) ...@@ -2570,7 +2550,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
break; break;
case BLK_STS_RESOURCE: case BLK_STS_RESOURCE:
case BLK_STS_DEV_RESOURCE: case BLK_STS_DEV_RESOURCE:
blk_mq_request_bypass_insert(rq, false, last); blk_mq_request_bypass_insert(rq, false, true);
blk_mq_commit_rqs(hctx, &queued, from_schedule); blk_mq_commit_rqs(hctx, &queued, from_schedule);
return; return;
default: default:
......
...@@ -555,7 +555,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu( ...@@ -555,7 +555,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu; return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
} }
static bool ubq_daemon_is_dying(struct ublk_queue *ubq) static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
{ {
return ubq->ubq_daemon->flags & PF_EXITING; return ubq->ubq_daemon->flags & PF_EXITING;
} }
...@@ -605,8 +605,9 @@ static void ublk_complete_rq(struct request *req) ...@@ -605,8 +605,9 @@ static void ublk_complete_rq(struct request *req)
} }
/* /*
* __ublk_fail_req() may be called from abort context or ->ubq_daemon * Since __ublk_rq_task_work always fails requests immediately during
* context during exiting, so lock is required. * exiting, __ublk_fail_req() is only called from abort context during
* exiting. So lock is unnecessary.
* *
* Also aborting may not be started yet, keep in mind that one failed * Also aborting may not be started yet, keep in mind that one failed
* request may be issued by block layer again. * request may be issued by block layer again.
...@@ -644,8 +645,7 @@ static inline void __ublk_rq_task_work(struct request *req) ...@@ -644,8 +645,7 @@ static inline void __ublk_rq_task_work(struct request *req)
struct ublk_device *ub = ubq->dev; struct ublk_device *ub = ubq->dev;
int tag = req->tag; int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag]; struct ublk_io *io = &ubq->ios[tag];
bool task_exiting = current != ubq->ubq_daemon || bool task_exiting = current != ubq->ubq_daemon || ubq_daemon_is_dying(ubq);
(current->flags & PF_EXITING);
unsigned int mapped_bytes; unsigned int mapped_bytes;
pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n", pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
...@@ -680,6 +680,11 @@ static inline void __ublk_rq_task_work(struct request *req) ...@@ -680,6 +680,11 @@ static inline void __ublk_rq_task_work(struct request *req)
* do the copy work. * do the copy work.
*/ */
io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA; io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
/* update iod->addr because ublksrv may have passed a new io buffer */
ublk_get_iod(ubq, req->tag)->addr = io->addr;
pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
__func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
ublk_get_iod(ubq, req->tag)->addr);
} }
mapped_bytes = ublk_map_io(ubq, req, io); mapped_bytes = ublk_map_io(ubq, req, io);
...@@ -751,9 +756,25 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -751,9 +756,25 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode)) if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
goto fail; goto fail;
} else { } else {
struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd; struct ublk_io *io = &ubq->ios[rq->tag];
struct io_uring_cmd *cmd = io->cmd;
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
/*
* If the check pass, we know that this is a re-issued request aborted
* previously in monitor_work because the ubq_daemon(cmd's task) is
* PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
* because this ioucmd's io_uring context may be freed now if no inflight
* ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
*
* Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
* the tag). Then the request is re-started(allocating the tag) and we are here.
* Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
* guarantees that here is a re-issued request aborted previously.
*/
if ((io->flags & UBLK_IO_FLAG_ABORTED))
goto fail;
pdu->req = rq; pdu->req = rq;
io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb); io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
} }
......
...@@ -857,7 +857,6 @@ void blk_mq_kick_requeue_list(struct request_queue *q); ...@@ -857,7 +857,6 @@ void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_complete_request(struct request *rq); void blk_mq_complete_request(struct request *rq);
bool blk_mq_complete_request_remote(struct request *rq); bool blk_mq_complete_request_remote(struct request *rq);
bool blk_mq_queue_stopped(struct request_queue *q);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_stop_hw_queues(struct request_queue *q);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment