Commit 544ccc8d authored by Omar Sandoval's avatar Omar Sandoval Committed by Jens Axboe

block: get rid of struct blk_issue_stat

struct blk_issue_stat squashes three things into one u64:

- The time the driver started working on a request
- The original size of the request (for the io.low controller)
- Flags for writeback throttling

It turns out that on x86_64, we have a 4 byte hole in struct request
which we can fill with the non-timestamp fields from blk_issue_stat,
simplifying things quite a bit.
Signed-off-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5238dcf4
...@@ -2991,7 +2991,10 @@ void blk_start_request(struct request *req) ...@@ -2991,7 +2991,10 @@ void blk_start_request(struct request *req)
blk_dequeue_request(req); blk_dequeue_request(req);
if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) { if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
blk_stat_set_issue(&req->issue_stat, blk_rq_sectors(req)); req->io_start_time_ns = ktime_get_ns();
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
req->throtl_size = blk_rq_sectors(req);
#endif
req->rq_flags |= RQF_STATS; req->rq_flags |= RQF_STATS;
wbt_issue(req->q->rq_wb, req); wbt_issue(req->q->rq_wb, req);
} }
......
...@@ -310,6 +310,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, ...@@ -310,6 +310,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->rq_disk = NULL; rq->rq_disk = NULL;
rq->part = NULL; rq->part = NULL;
rq->start_time = jiffies; rq->start_time = jiffies;
rq->io_start_time_ns = 0;
rq->nr_phys_segments = 0; rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY) #if defined(CONFIG_BLK_DEV_INTEGRITY)
rq->nr_integrity_segments = 0; rq->nr_integrity_segments = 0;
...@@ -329,7 +330,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, ...@@ -329,7 +330,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
rq->rl = NULL; rq->rl = NULL;
set_start_time_ns(rq); set_start_time_ns(rq);
rq->io_start_time_ns = 0; rq->cgroup_io_start_time_ns = 0;
#endif #endif
data->ctx->rq_dispatched[op_is_sync(op)]++; data->ctx->rq_dispatched[op_is_sync(op)]++;
...@@ -669,7 +670,10 @@ void blk_mq_start_request(struct request *rq) ...@@ -669,7 +670,10 @@ void blk_mq_start_request(struct request *rq)
trace_block_rq_issue(q, rq); trace_block_rq_issue(q, rq);
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq)); rq->io_start_time_ns = ktime_get_ns();
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
rq->throtl_size = blk_rq_sectors(rq);
#endif
rq->rq_flags |= RQF_STATS; rq->rq_flags |= RQF_STATS;
wbt_issue(q->rq_wb, rq); wbt_issue(q->rq_wb, rq);
} }
......
...@@ -55,11 +55,8 @@ void blk_stat_add(struct request *rq) ...@@ -55,11 +55,8 @@ void blk_stat_add(struct request *rq)
int bucket; int bucket;
u64 now, value; u64 now, value;
now = __blk_stat_time(ktime_to_ns(ktime_get())); now = ktime_get_ns();
if (now < blk_stat_time(&rq->issue_stat)) value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
return;
value = now - blk_stat_time(&rq->issue_stat);
blk_throtl_stat_add(rq, value); blk_throtl_stat_add(rq, value);
......
...@@ -8,21 +8,6 @@ ...@@ -8,21 +8,6 @@
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/timer.h> #include <linux/timer.h>
/*
* from upper:
* 4 bits: reserved for other usage
* 12 bits: size
* 48 bits: time
*/
#define BLK_STAT_RES_BITS 4
#define BLK_STAT_SIZE_BITS 12
#define BLK_STAT_RES_SHIFT (64 - BLK_STAT_RES_BITS)
#define BLK_STAT_SIZE_SHIFT (BLK_STAT_RES_SHIFT - BLK_STAT_SIZE_BITS)
#define BLK_STAT_TIME_MASK ((1ULL << BLK_STAT_SIZE_SHIFT) - 1)
#define BLK_STAT_SIZE_MASK \
(((1ULL << BLK_STAT_SIZE_BITS) - 1) << BLK_STAT_SIZE_SHIFT)
#define BLK_STAT_RES_MASK (~((1ULL << BLK_STAT_RES_SHIFT) - 1))
/** /**
* struct blk_stat_callback - Block statistics callback. * struct blk_stat_callback - Block statistics callback.
* *
...@@ -82,34 +67,6 @@ void blk_free_queue_stats(struct blk_queue_stats *); ...@@ -82,34 +67,6 @@ void blk_free_queue_stats(struct blk_queue_stats *);
void blk_stat_add(struct request *); void blk_stat_add(struct request *);
static inline u64 __blk_stat_time(u64 time)
{
return time & BLK_STAT_TIME_MASK;
}
static inline u64 blk_stat_time(struct blk_issue_stat *stat)
{
return __blk_stat_time(stat->stat);
}
static inline sector_t blk_capped_size(sector_t size)
{
return size & ((1ULL << BLK_STAT_SIZE_BITS) - 1);
}
static inline sector_t blk_stat_size(struct blk_issue_stat *stat)
{
return (stat->stat & BLK_STAT_SIZE_MASK) >> BLK_STAT_SIZE_SHIFT;
}
static inline void blk_stat_set_issue(struct blk_issue_stat *stat,
sector_t size)
{
stat->stat = (stat->stat & BLK_STAT_RES_MASK) |
(ktime_to_ns(ktime_get()) & BLK_STAT_TIME_MASK) |
(((u64)blk_capped_size(size)) << BLK_STAT_SIZE_SHIFT);
}
/* record time/size info in request but not add a callback */ /* record time/size info in request but not add a callback */
void blk_stat_enable_accounting(struct request_queue *q); void blk_stat_enable_accounting(struct request_queue *q);
......
...@@ -2279,8 +2279,7 @@ void blk_throtl_stat_add(struct request *rq, u64 time_ns) ...@@ -2279,8 +2279,7 @@ void blk_throtl_stat_add(struct request *rq, u64 time_ns)
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct throtl_data *td = q->td; struct throtl_data *td = q->td;
throtl_track_latency(td, blk_stat_size(&rq->issue_stat), throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10);
req_op(rq), time_ns >> 10);
} }
void blk_throtl_bio_endio(struct bio *bio) void blk_throtl_bio_endio(struct bio *bio)
......
...@@ -31,22 +31,22 @@ ...@@ -31,22 +31,22 @@
static inline void wbt_clear_state(struct request *rq) static inline void wbt_clear_state(struct request *rq)
{ {
rq->issue_stat.stat &= ~BLK_STAT_RES_MASK; rq->wbt_flags = 0;
} }
static inline enum wbt_flags wbt_flags(struct request *rq) static inline enum wbt_flags wbt_flags(struct request *rq)
{ {
return (rq->issue_stat.stat & BLK_STAT_RES_MASK) >> BLK_STAT_RES_SHIFT; return rq->wbt_flags;
} }
static inline bool wbt_is_tracked(struct request *rq) static inline bool wbt_is_tracked(struct request *rq)
{ {
return (rq->issue_stat.stat >> BLK_STAT_RES_SHIFT) & WBT_TRACKED; return rq->wbt_flags & WBT_TRACKED;
} }
static inline bool wbt_is_read(struct request *rq) static inline bool wbt_is_read(struct request *rq)
{ {
return (rq->issue_stat.stat >> BLK_STAT_RES_SHIFT) & WBT_READ; return rq->wbt_flags & WBT_READ;
} }
enum { enum {
...@@ -657,7 +657,7 @@ void wbt_issue(struct rq_wb *rwb, struct request *rq) ...@@ -657,7 +657,7 @@ void wbt_issue(struct rq_wb *rwb, struct request *rq)
*/ */
if (wbt_is_read(rq) && !rwb->sync_issue) { if (wbt_is_read(rq) && !rwb->sync_issue) {
rwb->sync_cookie = rq; rwb->sync_cookie = rq;
rwb->sync_issue = blk_stat_time(&rq->issue_stat); rwb->sync_issue = rq->io_start_time_ns;
} }
} }
...@@ -746,8 +746,6 @@ int wbt_init(struct request_queue *q) ...@@ -746,8 +746,6 @@ int wbt_init(struct request_queue *q)
struct rq_wb *rwb; struct rq_wb *rwb;
int i; int i;
BUILD_BUG_ON(WBT_NR_BITS > BLK_STAT_RES_BITS);
rwb = kzalloc(sizeof(*rwb), GFP_KERNEL); rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
if (!rwb) if (!rwb)
return -ENOMEM; return -ENOMEM;
......
...@@ -63,7 +63,7 @@ struct rq_wb { ...@@ -63,7 +63,7 @@ struct rq_wb {
struct blk_stat_callback *cb; struct blk_stat_callback *cb;
s64 sync_issue; u64 sync_issue;
void *sync_cookie; void *sync_cookie;
unsigned int wc; unsigned int wc;
...@@ -90,7 +90,7 @@ static inline unsigned int wbt_inflight(struct rq_wb *rwb) ...@@ -90,7 +90,7 @@ static inline unsigned int wbt_inflight(struct rq_wb *rwb)
static inline void wbt_track(struct request *rq, enum wbt_flags flags) static inline void wbt_track(struct request *rq, enum wbt_flags flags)
{ {
rq->issue_stat.stat |= ((u64)flags) << BLK_STAT_RES_SHIFT; rq->wbt_flags |= flags;
} }
void __wbt_done(struct rq_wb *, enum wbt_flags); void __wbt_done(struct rq_wb *, enum wbt_flags);
......
...@@ -485,11 +485,11 @@ static void kyber_completed_request(struct request *rq) ...@@ -485,11 +485,11 @@ static void kyber_completed_request(struct request *rq)
if (blk_stat_is_active(kqd->cb)) if (blk_stat_is_active(kqd->cb))
return; return;
now = __blk_stat_time(ktime_to_ns(ktime_get())); now = ktime_get_ns();
if (now < blk_stat_time(&rq->issue_stat)) if (now < rq->io_start_time_ns)
return; return;
latency = now - blk_stat_time(&rq->issue_stat); latency = now - rq->io_start_time_ns;
if (latency > target) if (latency > target)
blk_stat_activate_msecs(kqd->cb, 10); blk_stat_activate_msecs(kqd->cb, 10);
......
...@@ -91,10 +91,6 @@ static inline bool blk_path_error(blk_status_t error) ...@@ -91,10 +91,6 @@ static inline bool blk_path_error(blk_status_t error)
return true; return true;
} }
struct blk_issue_stat {
u64 stat;
};
/* /*
* From most significant bit: * From most significant bit:
* 1 bit: reserved for other usage, see below * 1 bit: reserved for other usage, see below
......
...@@ -206,8 +206,18 @@ struct request { ...@@ -206,8 +206,18 @@ struct request {
struct gendisk *rq_disk; struct gendisk *rq_disk;
struct hd_struct *part; struct hd_struct *part;
unsigned long start_time; unsigned long start_time;
struct blk_issue_stat issue_stat; /* Time that I/O was submitted to the device. */
/* Number of scatter-gather DMA addr+len pairs after u64 io_start_time_ns;
#ifdef CONFIG_BLK_WBT
unsigned short wbt_flags;
#endif
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
unsigned short throtl_size;
#endif
/*
* Number of scatter-gather DMA addr+len pairs after
* physical address coalescing is performed. * physical address coalescing is performed.
*/ */
unsigned short nr_phys_segments; unsigned short nr_phys_segments;
...@@ -267,8 +277,8 @@ struct request { ...@@ -267,8 +277,8 @@ struct request {
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
struct request_list *rl; /* rl this rq is alloced from */ struct request_list *rl; /* rl this rq is alloced from */
unsigned long long start_time_ns; unsigned long long cgroup_start_time_ns;
unsigned long long io_start_time_ns; /* when passed to hardware */ unsigned long long cgroup_io_start_time_ns; /* when passed to hardware */
#endif #endif
}; };
...@@ -1797,25 +1807,25 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned lo ...@@ -1797,25 +1807,25 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned lo
static inline void set_start_time_ns(struct request *req) static inline void set_start_time_ns(struct request *req)
{ {
preempt_disable(); preempt_disable();
req->start_time_ns = sched_clock(); req->cgroup_start_time_ns = sched_clock();
preempt_enable(); preempt_enable();
} }
static inline void set_io_start_time_ns(struct request *req) static inline void set_io_start_time_ns(struct request *req)
{ {
preempt_disable(); preempt_disable();
req->io_start_time_ns = sched_clock(); req->cgroup_io_start_time_ns = sched_clock();
preempt_enable(); preempt_enable();
} }
static inline uint64_t rq_start_time_ns(struct request *req) static inline uint64_t rq_start_time_ns(struct request *req)
{ {
return req->start_time_ns; return req->cgroup_start_time_ns;
} }
static inline uint64_t rq_io_start_time_ns(struct request *req) static inline uint64_t rq_io_start_time_ns(struct request *req)
{ {
return req->io_start_time_ns; return req->cgroup_io_start_time_ns;
} }
#else #else
static inline void set_start_time_ns(struct request *req) {} static inline void set_start_time_ns(struct request *req) {}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment