Commit fa2e39cb authored by Omar Sandoval's avatar Omar Sandoval Committed by Jens Axboe

blk-stat: use READ and WRITE instead of BLK_STAT_{READ,WRITE}

The stats buckets will become generic soon, so make the existing users
use the common READ and WRITE definitions instead of one internal to
blk-stat.
Signed-off-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 0315b159
...@@ -333,17 +333,17 @@ static int hctx_stats_show(struct seq_file *m, void *v) ...@@ -333,17 +333,17 @@ static int hctx_stats_show(struct seq_file *m, void *v)
struct blk_mq_hw_ctx *hctx = m->private; struct blk_mq_hw_ctx *hctx = m->private;
struct blk_rq_stat stat[2]; struct blk_rq_stat stat[2];
blk_stat_init(&stat[BLK_STAT_READ]); blk_stat_init(&stat[READ]);
blk_stat_init(&stat[BLK_STAT_WRITE]); blk_stat_init(&stat[WRITE]);
blk_hctx_stat_get(hctx, stat); blk_hctx_stat_get(hctx, stat);
seq_puts(m, "read: "); seq_puts(m, "read: ");
print_stat(m, &stat[BLK_STAT_READ]); print_stat(m, &stat[READ]);
seq_puts(m, "\n"); seq_puts(m, "\n");
seq_puts(m, "write: "); seq_puts(m, "write: ");
print_stat(m, &stat[BLK_STAT_WRITE]); print_stat(m, &stat[WRITE]);
seq_puts(m, "\n"); seq_puts(m, "\n");
return 0; return 0;
} }
...@@ -362,8 +362,8 @@ static ssize_t hctx_stats_write(struct file *file, const char __user *buf, ...@@ -362,8 +362,8 @@ static ssize_t hctx_stats_write(struct file *file, const char __user *buf,
int i; int i;
hctx_for_each_ctx(hctx, ctx, i) { hctx_for_each_ctx(hctx, ctx, i) {
blk_stat_init(&ctx->stat[BLK_STAT_READ]); blk_stat_init(&ctx->stat[READ]);
blk_stat_init(&ctx->stat[BLK_STAT_WRITE]); blk_stat_init(&ctx->stat[WRITE]);
} }
return count; return count;
} }
......
...@@ -2040,8 +2040,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, ...@@ -2040,8 +2040,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
spin_lock_init(&__ctx->lock); spin_lock_init(&__ctx->lock);
INIT_LIST_HEAD(&__ctx->rq_list); INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q; __ctx->queue = q;
blk_stat_init(&__ctx->stat[BLK_STAT_READ]); blk_stat_init(&__ctx->stat[READ]);
blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]); blk_stat_init(&__ctx->stat[WRITE]);
/* If the cpu isn't online, the cpu is mapped to first hctx */ /* If the cpu isn't online, the cpu is mapped to first hctx */
if (!cpu_online(i)) if (!cpu_online(i))
...@@ -2769,10 +2769,10 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q, ...@@ -2769,10 +2769,10 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
* important on devices where the completion latencies are longer * important on devices where the completion latencies are longer
* than ~10 usec. * than ~10 usec.
*/ */
if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples) if (req_op(rq) == REQ_OP_READ && stat[READ].nr_samples)
ret = (stat[BLK_STAT_READ].mean + 1) / 2; ret = (stat[READ].mean + 1) / 2;
else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples) else if (req_op(rq) == REQ_OP_WRITE && stat[WRITE].nr_samples)
ret = (stat[BLK_STAT_WRITE].mean + 1) / 2; ret = (stat[WRITE].mean + 1) / 2;
return ret; return ret;
} }
......
...@@ -55,8 +55,8 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst) ...@@ -55,8 +55,8 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
uint64_t latest = 0; uint64_t latest = 0;
int i, j, nr; int i, j, nr;
blk_stat_init(&dst[BLK_STAT_READ]); blk_stat_init(&dst[READ]);
blk_stat_init(&dst[BLK_STAT_WRITE]); blk_stat_init(&dst[WRITE]);
nr = 0; nr = 0;
do { do {
...@@ -64,16 +64,16 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst) ...@@ -64,16 +64,16 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
hctx_for_each_ctx(hctx, ctx, j) { hctx_for_each_ctx(hctx, ctx, j) {
blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]); blk_stat_flush_batch(&ctx->stat[READ]);
blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]); blk_stat_flush_batch(&ctx->stat[WRITE]);
if (!ctx->stat[BLK_STAT_READ].nr_samples && if (!ctx->stat[READ].nr_samples &&
!ctx->stat[BLK_STAT_WRITE].nr_samples) !ctx->stat[WRITE].nr_samples)
continue; continue;
if (ctx->stat[BLK_STAT_READ].time > newest) if (ctx->stat[READ].time > newest)
newest = ctx->stat[BLK_STAT_READ].time; newest = ctx->stat[READ].time;
if (ctx->stat[BLK_STAT_WRITE].time > newest) if (ctx->stat[WRITE].time > newest)
newest = ctx->stat[BLK_STAT_WRITE].time; newest = ctx->stat[WRITE].time;
} }
} }
...@@ -88,14 +88,14 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst) ...@@ -88,14 +88,14 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
hctx_for_each_ctx(hctx, ctx, j) { hctx_for_each_ctx(hctx, ctx, j) {
if (ctx->stat[BLK_STAT_READ].time == newest) { if (ctx->stat[READ].time == newest) {
blk_stat_sum(&dst[BLK_STAT_READ], blk_stat_sum(&dst[READ],
&ctx->stat[BLK_STAT_READ]); &ctx->stat[READ]);
nr++; nr++;
} }
if (ctx->stat[BLK_STAT_WRITE].time == newest) { if (ctx->stat[WRITE].time == newest) {
blk_stat_sum(&dst[BLK_STAT_WRITE], blk_stat_sum(&dst[WRITE],
&ctx->stat[BLK_STAT_WRITE]); &ctx->stat[WRITE]);
nr++; nr++;
} }
} }
...@@ -106,7 +106,7 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst) ...@@ -106,7 +106,7 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
*/ */
} while (!nr); } while (!nr);
dst[BLK_STAT_READ].time = dst[BLK_STAT_WRITE].time = latest; dst[READ].time = dst[WRITE].time = latest;
} }
void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst) void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
...@@ -114,11 +114,11 @@ void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst) ...@@ -114,11 +114,11 @@ void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
if (q->mq_ops) if (q->mq_ops)
blk_mq_stat_get(q, dst); blk_mq_stat_get(q, dst);
else { else {
blk_stat_flush_batch(&q->rq_stats[BLK_STAT_READ]); blk_stat_flush_batch(&q->rq_stats[READ]);
blk_stat_flush_batch(&q->rq_stats[BLK_STAT_WRITE]); blk_stat_flush_batch(&q->rq_stats[WRITE]);
memcpy(&dst[BLK_STAT_READ], &q->rq_stats[BLK_STAT_READ], memcpy(&dst[READ], &q->rq_stats[READ],
sizeof(struct blk_rq_stat)); sizeof(struct blk_rq_stat));
memcpy(&dst[BLK_STAT_WRITE], &q->rq_stats[BLK_STAT_WRITE], memcpy(&dst[WRITE], &q->rq_stats[WRITE],
sizeof(struct blk_rq_stat)); sizeof(struct blk_rq_stat));
} }
} }
...@@ -133,31 +133,29 @@ void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst) ...@@ -133,31 +133,29 @@ void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst)
uint64_t newest = 0; uint64_t newest = 0;
hctx_for_each_ctx(hctx, ctx, i) { hctx_for_each_ctx(hctx, ctx, i) {
blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]); blk_stat_flush_batch(&ctx->stat[READ]);
blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]); blk_stat_flush_batch(&ctx->stat[WRITE]);
if (!ctx->stat[BLK_STAT_READ].nr_samples && if (!ctx->stat[READ].nr_samples &&
!ctx->stat[BLK_STAT_WRITE].nr_samples) !ctx->stat[WRITE].nr_samples)
continue; continue;
if (ctx->stat[BLK_STAT_READ].time > newest) if (ctx->stat[READ].time > newest)
newest = ctx->stat[BLK_STAT_READ].time; newest = ctx->stat[READ].time;
if (ctx->stat[BLK_STAT_WRITE].time > newest) if (ctx->stat[WRITE].time > newest)
newest = ctx->stat[BLK_STAT_WRITE].time; newest = ctx->stat[WRITE].time;
} }
if (!newest) if (!newest)
break; break;
hctx_for_each_ctx(hctx, ctx, i) { hctx_for_each_ctx(hctx, ctx, i) {
if (ctx->stat[BLK_STAT_READ].time == newest) { if (ctx->stat[READ].time == newest) {
blk_stat_sum(&dst[BLK_STAT_READ], blk_stat_sum(&dst[READ], &ctx->stat[READ]);
&ctx->stat[BLK_STAT_READ]);
nr++; nr++;
} }
if (ctx->stat[BLK_STAT_WRITE].time == newest) { if (ctx->stat[WRITE].time == newest) {
blk_stat_sum(&dst[BLK_STAT_WRITE], blk_stat_sum(&dst[WRITE], &ctx->stat[WRITE]);
&ctx->stat[BLK_STAT_WRITE]);
nr++; nr++;
} }
} }
...@@ -226,13 +224,13 @@ void blk_stat_clear(struct request_queue *q) ...@@ -226,13 +224,13 @@ void blk_stat_clear(struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
hctx_for_each_ctx(hctx, ctx, j) { hctx_for_each_ctx(hctx, ctx, j) {
blk_stat_init(&ctx->stat[BLK_STAT_READ]); blk_stat_init(&ctx->stat[READ]);
blk_stat_init(&ctx->stat[BLK_STAT_WRITE]); blk_stat_init(&ctx->stat[WRITE]);
} }
} }
} else { } else {
blk_stat_init(&q->rq_stats[BLK_STAT_READ]); blk_stat_init(&q->rq_stats[READ]);
blk_stat_init(&q->rq_stats[BLK_STAT_WRITE]); blk_stat_init(&q->rq_stats[WRITE]);
} }
} }
......
...@@ -15,11 +15,6 @@ ...@@ -15,11 +15,6 @@
#define BLK_STAT_TIME_MASK ((1ULL << BLK_STAT_SHIFT) - 1) #define BLK_STAT_TIME_MASK ((1ULL << BLK_STAT_SHIFT) - 1)
#define BLK_STAT_MASK ~BLK_STAT_TIME_MASK #define BLK_STAT_MASK ~BLK_STAT_TIME_MASK
enum {
BLK_STAT_READ = 0,
BLK_STAT_WRITE,
};
void blk_stat_add(struct blk_rq_stat *, struct request *); void blk_stat_add(struct blk_rq_stat *, struct request *);
void blk_hctx_stat_get(struct blk_mq_hw_ctx *, struct blk_rq_stat *); void blk_hctx_stat_get(struct blk_mq_hw_ctx *, struct blk_rq_stat *);
void blk_queue_stat_get(struct request_queue *, struct blk_rq_stat *); void blk_queue_stat_get(struct request_queue *, struct blk_rq_stat *);
......
...@@ -518,8 +518,8 @@ static ssize_t queue_stats_show(struct request_queue *q, char *page) ...@@ -518,8 +518,8 @@ static ssize_t queue_stats_show(struct request_queue *q, char *page)
blk_queue_stat_get(q, stat); blk_queue_stat_get(q, stat);
ret = print_stat(page, &stat[BLK_STAT_READ], "read :"); ret = print_stat(page, &stat[READ], "read :");
ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:"); ret += print_stat(page + ret, &stat[WRITE], "write:");
return ret; return ret;
} }
......
...@@ -255,8 +255,8 @@ static inline bool stat_sample_valid(struct blk_rq_stat *stat) ...@@ -255,8 +255,8 @@ static inline bool stat_sample_valid(struct blk_rq_stat *stat)
* that it's writes impacting us, and not just some sole read on * that it's writes impacting us, and not just some sole read on
* a device that is in a lower power state. * a device that is in a lower power state.
*/ */
return stat[BLK_STAT_READ].nr_samples >= 1 && return (stat[READ].nr_samples >= 1 &&
stat[BLK_STAT_WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES; stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
} }
static u64 rwb_sync_issue_lat(struct rq_wb *rwb) static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
...@@ -293,7 +293,7 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat) ...@@ -293,7 +293,7 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
*/ */
thislat = rwb_sync_issue_lat(rwb); thislat = rwb_sync_issue_lat(rwb);
if (thislat > rwb->cur_win_nsec || if (thislat > rwb->cur_win_nsec ||
(thislat > rwb->min_lat_nsec && !stat[BLK_STAT_READ].nr_samples)) { (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
trace_wbt_lat(bdi, thislat); trace_wbt_lat(bdi, thislat);
return LAT_EXCEEDED; return LAT_EXCEEDED;
} }
...@@ -308,7 +308,7 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat) ...@@ -308,7 +308,7 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
* waited or still has writes in flights, consider us doing * waited or still has writes in flights, consider us doing
* just writes as well. * just writes as well.
*/ */
if ((stat[BLK_STAT_WRITE].nr_samples && blk_stat_is_current(stat)) || if ((stat[WRITE].nr_samples && blk_stat_is_current(stat)) ||
wb_recent_wait(rwb) || wbt_inflight(rwb)) wb_recent_wait(rwb) || wbt_inflight(rwb))
return LAT_UNKNOWN_WRITES; return LAT_UNKNOWN_WRITES;
return LAT_UNKNOWN; return LAT_UNKNOWN;
...@@ -317,8 +317,8 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat) ...@@ -317,8 +317,8 @@ static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
/* /*
* If the 'min' latency exceeds our target, step down. * If the 'min' latency exceeds our target, step down.
*/ */
if (stat[BLK_STAT_READ].min > rwb->min_lat_nsec) { if (stat[READ].min > rwb->min_lat_nsec) {
trace_wbt_lat(bdi, stat[BLK_STAT_READ].min); trace_wbt_lat(bdi, stat[READ].min);
trace_wbt_stat(bdi, stat); trace_wbt_stat(bdi, stat);
return LAT_EXCEEDED; return LAT_EXCEEDED;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment