Commit b55ce732 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: kill already cached timeout.seq_offset

req->timeout.count and req->io->timeout.seq_offset store the same value,
which is sqe->off. Kill the second one
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 22cad158
...@@ -357,7 +357,6 @@ struct io_timeout_data { ...@@ -357,7 +357,6 @@ struct io_timeout_data {
struct hrtimer timer; struct hrtimer timer;
struct timespec64 ts; struct timespec64 ts;
enum hrtimer_mode mode; enum hrtimer_mode mode;
u32 seq_offset;
}; };
struct io_accept { struct io_accept {
...@@ -385,7 +384,7 @@ struct io_timeout { ...@@ -385,7 +384,7 @@ struct io_timeout {
struct file *file; struct file *file;
u64 addr; u64 addr;
int flags; int flags;
unsigned count; u32 count;
}; };
struct io_rw { struct io_rw {
...@@ -4709,11 +4708,11 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -4709,11 +4708,11 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
static int io_timeout(struct io_kiocb *req) static int io_timeout(struct io_kiocb *req)
{ {
unsigned count;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_timeout_data *data; struct io_timeout_data *data;
struct list_head *entry; struct list_head *entry;
unsigned span = 0; unsigned span = 0;
u32 count = req->timeout.count;
u32 seq = req->sequence; u32 seq = req->sequence;
data = &req->io->timeout; data = &req->io->timeout;
...@@ -4723,7 +4722,6 @@ static int io_timeout(struct io_kiocb *req) ...@@ -4723,7 +4722,6 @@ static int io_timeout(struct io_kiocb *req)
* timeout event to be satisfied. If it isn't set, then this is * timeout event to be satisfied. If it isn't set, then this is
* a pure timeout request, sequence isn't used. * a pure timeout request, sequence isn't used.
*/ */
count = req->timeout.count;
if (!count) { if (!count) {
req->flags |= REQ_F_TIMEOUT_NOSEQ; req->flags |= REQ_F_TIMEOUT_NOSEQ;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
...@@ -4732,7 +4730,6 @@ static int io_timeout(struct io_kiocb *req) ...@@ -4732,7 +4730,6 @@ static int io_timeout(struct io_kiocb *req)
} }
req->sequence = seq + count; req->sequence = seq + count;
data->seq_offset = count;
/* /*
* Insertion sort, ensuring the first entry in the list is always * Insertion sort, ensuring the first entry in the list is always
...@@ -4743,7 +4740,7 @@ static int io_timeout(struct io_kiocb *req) ...@@ -4743,7 +4740,7 @@ static int io_timeout(struct io_kiocb *req)
struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list); struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
unsigned nxt_seq; unsigned nxt_seq;
long long tmp, tmp_nxt; long long tmp, tmp_nxt;
u32 nxt_offset = nxt->io->timeout.seq_offset; u32 nxt_offset = nxt->timeout.count;
if (nxt->flags & REQ_F_TIMEOUT_NOSEQ) if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment