Commit 5a0ac57c authored by Yu Kuai's avatar Yu Kuai Committed by Jens Axboe

blk-ioc: protect ioc_destroy_icq() by 'queue_lock'

Currently, icq is tracked by both request_queue(icq->q_node) and
task(icq->ioc_node), and ioc_clear_queue() from elevator exit is not
safe because it can access the list without protection:

ioc_clear_queue			ioc_release_fn
 lock queue_lock
 list_splice
 /* move queue list to a local list */
 unlock queue_lock
 /*
  * lock is released, the local list
  * can be accessed through task exit.
  */

				lock ioc->lock
				while (!hlist_empty)
				 icq = hlist_entry
				 lock queue_lock
				  ioc_destroy_icq
				   delete icq->ioc_node
 while (!list_empty)
  icq = list_entry()		   list_del icq->q_node
  /*
   * This is not protected by any lock,
   * list_entry concurrent with list_del
   * is not safe.
   */

				 unlock queue_lock
				unlock ioc->lock

Fix this problem by protecting list 'icq->q_node' by queue_lock from
ioc_clear_queue().
Reported-and-tested-by: default avatarPradeep Pragallapati <quic_pragalla@quicinc.com>
Link: https://lore.kernel.org/lkml/20230517084434.18932-1-quic_pragalla@quicinc.com/Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230531073435.2923422-1-yukuai1@huaweicloud.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6c500000
...@@ -77,6 +77,10 @@ static void ioc_destroy_icq(struct io_cq *icq) ...@@ -77,6 +77,10 @@ static void ioc_destroy_icq(struct io_cq *icq)
struct elevator_type *et = q->elevator->type; struct elevator_type *et = q->elevator->type;
lockdep_assert_held(&ioc->lock); lockdep_assert_held(&ioc->lock);
lockdep_assert_held(&q->queue_lock);
if (icq->flags & ICQ_DESTROYED)
return;
radix_tree_delete(&ioc->icq_tree, icq->q->id); radix_tree_delete(&ioc->icq_tree, icq->q->id);
hlist_del_init(&icq->ioc_node); hlist_del_init(&icq->ioc_node);
...@@ -128,11 +132,6 @@ static void ioc_release_fn(struct work_struct *work) ...@@ -128,11 +132,6 @@ static void ioc_release_fn(struct work_struct *work)
spin_lock(&q->queue_lock); spin_lock(&q->queue_lock);
spin_lock(&ioc->lock); spin_lock(&ioc->lock);
/*
* The icq may have been destroyed when the ioc lock
* was released.
*/
if (!(icq->flags & ICQ_DESTROYED))
ioc_destroy_icq(icq); ioc_destroy_icq(icq);
spin_unlock(&q->queue_lock); spin_unlock(&q->queue_lock);
...@@ -171,23 +170,20 @@ static bool ioc_delay_free(struct io_context *ioc) ...@@ -171,23 +170,20 @@ static bool ioc_delay_free(struct io_context *ioc)
*/ */
void ioc_clear_queue(struct request_queue *q) void ioc_clear_queue(struct request_queue *q)
{ {
LIST_HEAD(icq_list);
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);
list_splice_init(&q->icq_list, &icq_list); while (!list_empty(&q->icq_list)) {
spin_unlock_irq(&q->queue_lock);
rcu_read_lock();
while (!list_empty(&icq_list)) {
struct io_cq *icq = struct io_cq *icq =
list_entry(icq_list.next, struct io_cq, q_node); list_first_entry(&q->icq_list, struct io_cq, q_node);
/*
* Other context won't hold ioc lock to wait for queue_lock, see
* details in ioc_release_fn().
*/
spin_lock_irq(&icq->ioc->lock); spin_lock_irq(&icq->ioc->lock);
if (!(icq->flags & ICQ_DESTROYED))
ioc_destroy_icq(icq); ioc_destroy_icq(icq);
spin_unlock_irq(&icq->ioc->lock); spin_unlock_irq(&icq->ioc->lock);
} }
rcu_read_unlock(); spin_unlock_irq(&q->queue_lock);
} }
#else /* CONFIG_BLK_ICQ */ #else /* CONFIG_BLK_ICQ */
static inline void ioc_exit_icqs(struct io_context *ioc) static inline void ioc_exit_icqs(struct io_context *ioc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment