Commit eca8b53a authored by Shaohua Li's avatar Shaohua Li Committed by Jens Axboe

blk-stat: delete useless code

Fix two issues:
- the per-cpu stat flush is unnecessary, nobody uses per-cpu stat except
  sum it to global stat. We can do the calculation there. The flush just
  wastes cpu time.
- some fields are signed int/s64. I don't see the point.
Reviewed-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarShaohua Li <shli@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 53cfdc10
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
#include "blk-mq.h" #include "blk-mq.h"
#include "blk.h" #include "blk.h"
#define BLK_RQ_STAT_BATCH 64
struct blk_queue_stats { struct blk_queue_stats {
struct list_head callbacks; struct list_head callbacks;
spinlock_t lock; spinlock_t lock;
...@@ -23,45 +21,21 @@ static void blk_stat_init(struct blk_rq_stat *stat) ...@@ -23,45 +21,21 @@ static void blk_stat_init(struct blk_rq_stat *stat)
{ {
stat->min = -1ULL; stat->min = -1ULL;
stat->max = stat->nr_samples = stat->mean = 0; stat->max = stat->nr_samples = stat->mean = 0;
stat->batch = stat->nr_batch = 0; stat->batch = 0;
}
static void blk_stat_flush_batch(struct blk_rq_stat *stat)
{
const s32 nr_batch = READ_ONCE(stat->nr_batch);
const s32 nr_samples = READ_ONCE(stat->nr_samples);
if (!nr_batch)
return;
if (!nr_samples)
stat->mean = div64_s64(stat->batch, nr_batch);
else {
stat->mean = div64_s64((stat->mean * nr_samples) +
stat->batch,
nr_batch + nr_samples);
}
stat->nr_samples += nr_batch;
stat->nr_batch = stat->batch = 0;
} }
/* src is a per-cpu stat, mean isn't initialized */
static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{ {
blk_stat_flush_batch(src);
if (!src->nr_samples) if (!src->nr_samples)
return; return;
dst->min = min(dst->min, src->min); dst->min = min(dst->min, src->min);
dst->max = max(dst->max, src->max); dst->max = max(dst->max, src->max);
if (!dst->nr_samples) dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
dst->mean = src->mean;
else {
dst->mean = div64_s64((src->mean * src->nr_samples) +
(dst->mean * dst->nr_samples),
dst->nr_samples + src->nr_samples); dst->nr_samples + src->nr_samples);
}
dst->nr_samples += src->nr_samples; dst->nr_samples += src->nr_samples;
} }
...@@ -69,13 +43,8 @@ static void __blk_stat_add(struct blk_rq_stat *stat, u64 value) ...@@ -69,13 +43,8 @@ static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
{ {
stat->min = min(stat->min, value); stat->min = min(stat->min, value);
stat->max = max(stat->max, value); stat->max = max(stat->max, value);
if (stat->batch + value < stat->batch ||
stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
blk_stat_flush_batch(stat);
stat->batch += value; stat->batch += value;
stat->nr_batch++; stat->nr_samples++;
} }
void blk_stat_add(struct request *rq) void blk_stat_add(struct request *rq)
...@@ -84,7 +53,7 @@ void blk_stat_add(struct request *rq) ...@@ -84,7 +53,7 @@ void blk_stat_add(struct request *rq)
struct blk_stat_callback *cb; struct blk_stat_callback *cb;
struct blk_rq_stat *stat; struct blk_rq_stat *stat;
int bucket; int bucket;
s64 now, value; u64 now, value;
now = __blk_stat_time(ktime_to_ns(ktime_get())); now = __blk_stat_time(ktime_to_ns(ktime_get()));
if (now < blk_stat_time(&rq->issue_stat)) if (now < blk_stat_time(&rq->issue_stat))
......
...@@ -329,11 +329,10 @@ static inline bool blk_qc_t_is_internal(blk_qc_t cookie) ...@@ -329,11 +329,10 @@ static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
} }
struct blk_rq_stat { struct blk_rq_stat {
s64 mean; u64 mean;
u64 min; u64 min;
u64 max; u64 max;
s32 nr_samples; u32 nr_samples;
s32 nr_batch;
u64 batch; u64 batch;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment