Commit 575969a0 authored by Vivek Goyal's avatar Vivek Goyal Committed by Jens Axboe

blk-cgroup: Make 64bit per cpu stats safe on 32bit arch

Some of the stats are 64bit and updation will be non atomic on 32bit
architecture. Use sequence counters on 32bit arch to make reading
of stats safe.
Signed-off-by: default avatarVivek Goyal <vgoyal@redhat.com>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 5624a4e4
...@@ -400,14 +400,25 @@ void blkiocg_update_dispatch_stats(struct blkio_group *blkg, ...@@ -400,14 +400,25 @@ void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
uint64_t bytes, bool direction, bool sync) uint64_t bytes, bool direction, bool sync)
{ {
struct blkio_group_stats_cpu *stats_cpu; struct blkio_group_stats_cpu *stats_cpu;
unsigned long flags;
/*
* Disabling interrupts to provide mutual exclusion between two
* writes on same cpu. It probably is not needed for 64bit. Not
* optimizing that case yet.
*/
local_irq_save(flags);
stats_cpu = this_cpu_ptr(blkg->stats_cpu); stats_cpu = this_cpu_ptr(blkg->stats_cpu);
u64_stats_update_begin(&stats_cpu->syncp);
stats_cpu->sectors += bytes >> 9; stats_cpu->sectors += bytes >> 9;
blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED], blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
1, direction, sync); 1, direction, sync);
blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES], blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
bytes, direction, sync); bytes, direction, sync);
u64_stats_update_end(&stats_cpu->syncp);
local_irq_restore(flags);
} }
EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
...@@ -622,15 +633,21 @@ static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, ...@@ -622,15 +633,21 @@ static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
{ {
int cpu; int cpu;
struct blkio_group_stats_cpu *stats_cpu; struct blkio_group_stats_cpu *stats_cpu;
uint64_t val = 0; u64 val = 0, tval;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
unsigned int start;
stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu); stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
do {
start = u64_stats_fetch_begin(&stats_cpu->syncp);
if (type == BLKIO_STAT_CPU_SECTORS) if (type == BLKIO_STAT_CPU_SECTORS)
val += stats_cpu->sectors; tval = stats_cpu->sectors;
else else
val += stats_cpu->stat_arr_cpu[type][sub_type]; tval = stats_cpu->stat_arr_cpu[type][sub_type];
} while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
val += tval;
} }
return val; return val;
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
*/ */
#include <linux/cgroup.h> #include <linux/cgroup.h>
#include <linux/u64_stats_sync.h>
enum blkio_policy_id { enum blkio_policy_id {
BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */ BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
...@@ -154,6 +155,7 @@ struct blkio_group_stats { ...@@ -154,6 +155,7 @@ struct blkio_group_stats {
struct blkio_group_stats_cpu { struct blkio_group_stats_cpu {
uint64_t sectors; uint64_t sectors;
uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL]; uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
struct u64_stats_sync syncp;
}; };
struct blkio_group { struct blkio_group {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment