Commit 450b7879 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: move blk_account_io_{start,done} to blk-mq.c

These are only used for request based I/O, so move them where they are
used.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20211117061404.331732-9-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f2b8f3ce
...@@ -1064,8 +1064,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, ...@@ -1064,8 +1064,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
} }
EXPORT_SYMBOL_GPL(iocb_bio_iopoll); EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
static void update_io_ticks(struct block_device *part, unsigned long now, void update_io_ticks(struct block_device *part, unsigned long now, bool end)
bool end)
{ {
unsigned long stamp; unsigned long stamp;
again: again:
...@@ -1080,30 +1079,6 @@ static void update_io_ticks(struct block_device *part, unsigned long now, ...@@ -1080,30 +1079,6 @@ static void update_io_ticks(struct block_device *part, unsigned long now,
} }
} }
void __blk_account_io_done(struct request *req, u64 now)
{
const int sgrp = op_stat_group(req_op(req));
part_stat_lock();
update_io_ticks(req->part, jiffies, true);
part_stat_inc(req->part, ios[sgrp]);
part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
part_stat_unlock();
}
void __blk_account_io_start(struct request *rq)
{
/* passthrough requests can hold bios that do not have ->bi_bdev set */
if (rq->bio && rq->bio->bi_bdev)
rq->part = rq->bio->bi_bdev;
else
rq->part = rq->rq_disk->part0;
part_stat_lock();
update_io_ticks(rq->part, jiffies, false);
part_stat_unlock();
}
static unsigned long __part_start_io_acct(struct block_device *part, static unsigned long __part_start_io_acct(struct block_device *part,
unsigned int sectors, unsigned int op) unsigned int sectors, unsigned int op)
{ {
......
...@@ -809,6 +809,48 @@ bool blk_update_request(struct request *req, blk_status_t error, ...@@ -809,6 +809,48 @@ bool blk_update_request(struct request *req, blk_status_t error,
} }
EXPORT_SYMBOL_GPL(blk_update_request); EXPORT_SYMBOL_GPL(blk_update_request);
static void __blk_account_io_done(struct request *req, u64 now)
{
const int sgrp = op_stat_group(req_op(req));
part_stat_lock();
update_io_ticks(req->part, jiffies, true);
part_stat_inc(req->part, ios[sgrp]);
part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
part_stat_unlock();
}
static inline void blk_account_io_done(struct request *req, u64 now)
{
/*
* Account IO completion. flush_rq isn't accounted as a
* normal IO on queueing nor completion. Accounting the
* containing request is enough.
*/
if (blk_do_io_stat(req) && req->part &&
!(req->rq_flags & RQF_FLUSH_SEQ))
__blk_account_io_done(req, now);
}
static void __blk_account_io_start(struct request *rq)
{
/* passthrough requests can hold bios that do not have ->bi_bdev set */
if (rq->bio && rq->bio->bi_bdev)
rq->part = rq->bio->bi_bdev;
else
rq->part = rq->rq_disk->part0;
part_stat_lock();
update_io_ticks(rq->part, jiffies, false);
part_stat_unlock();
}
static inline void blk_account_io_start(struct request *req)
{
if (blk_do_io_stat(req))
__blk_account_io_start(req);
}
static inline void __blk_mq_end_request_acct(struct request *rq, u64 now) static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
{ {
if (rq->rq_flags & RQF_STATS) { if (rq->rq_flags & RQF_STATS) {
......
...@@ -257,9 +257,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, ...@@ -257,9 +257,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
struct bio *bio, unsigned int nr_segs); struct bio *bio, unsigned int nr_segs);
void __blk_account_io_start(struct request *req);
void __blk_account_io_done(struct request *req, u64 now);
/* /*
* Plug flush limits * Plug flush limits
*/ */
...@@ -350,23 +347,7 @@ static inline bool blk_do_io_stat(struct request *rq) ...@@ -350,23 +347,7 @@ static inline bool blk_do_io_stat(struct request *rq)
return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk; return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk;
} }
static inline void blk_account_io_done(struct request *req, u64 now) void update_io_ticks(struct block_device *part, unsigned long now, bool end);
{
/*
* Account IO completion. flush_rq isn't accounted as a
* normal IO on queueing nor completion. Accounting the
* containing request is enough.
*/
if (blk_do_io_stat(req) && req->part &&
!(req->rq_flags & RQF_FLUSH_SEQ))
__blk_account_io_done(req, now);
}
static inline void blk_account_io_start(struct request *req)
{
if (blk_do_io_stat(req))
__blk_account_io_start(req);
}
static inline void req_set_nomerge(struct request_queue *q, struct request *req) static inline void req_set_nomerge(struct request_queue *q, struct request *req)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment