Commit f462fd36 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io-wq: optimise out *next_work() double lock

When executing non-linked hashed work, io_worker_handle_work()
will lock-unlock wqe->lock to update hash, and then immediately
lock-unlock to get next work. Optimise this case and do
lock/unlock only once.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 58e39319
...@@ -474,11 +474,11 @@ static void io_worker_handle_work(struct io_worker *worker) ...@@ -474,11 +474,11 @@ static void io_worker_handle_work(struct io_worker *worker)
{ {
struct io_wqe *wqe = worker->wqe; struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq; struct io_wq *wq = wqe->wq;
unsigned hash = -1U;
do { do {
struct io_wq_work *work; struct io_wq_work *work;
unsigned hash = -1U; get_next:
/* /*
* If we got some work, mark us as busy. If we didn't, but * If we got some work, mark us as busy. If we didn't, but
* the list isn't empty, it means we stalled on hashed work. * the list isn't empty, it means we stalled on hashed work.
...@@ -524,9 +524,12 @@ static void io_worker_handle_work(struct io_worker *worker) ...@@ -524,9 +524,12 @@ static void io_worker_handle_work(struct io_worker *worker)
spin_lock_irq(&wqe->lock); spin_lock_irq(&wqe->lock);
wqe->hash_map &= ~BIT_ULL(hash); wqe->hash_map &= ~BIT_ULL(hash);
wqe->flags &= ~IO_WQE_FLAG_STALLED; wqe->flags &= ~IO_WQE_FLAG_STALLED;
spin_unlock_irq(&wqe->lock);
/* dependent work is not hashed */ /* dependent work is not hashed */
hash = -1U; hash = -1U;
/* skip unnecessary unlock-lock wqe->lock */
if (!work)
goto get_next;
spin_unlock_irq(&wqe->lock);
} }
} while (work); } while (work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment