Commit f975d6bc authored by Theodore Ts'o's avatar Theodore Ts'o

ext4: teach ext4_statfs() to deal with clusters if bigalloc is enabled

Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
parent 24aaa8ef
...@@ -4580,16 +4580,34 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) ...@@ -4580,16 +4580,34 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
return err; return err;
} }
/*
* Note: calculating the overhead so we can be compatible with
* historical BSD practice is quite difficult in the face of
* clusters/bigalloc. This is because multiple metadata blocks from
* different block group can end up in the same allocation cluster.
* Calculating the exact overhead in the face of clustered allocation
* requires either O(all block bitmaps) in memory or O(number of block
* groups**2) in time. We will still calculate the superblock for
* older file systems --- and if we come across with a bigalloc file
* system with zero in s_overhead_clusters the estimate will be close to
* correct especially for very large cluster sizes --- but for newer
* file systems, it's better to calculate this figure once at mkfs
* time, and store it in the superblock. If the superblock value is
* present (even for non-bigalloc file systems), we will use it.
*/
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
{ {
struct super_block *sb = dentry->d_sb; struct super_block *sb = dentry->d_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es; struct ext4_super_block *es = sbi->s_es;
struct ext4_group_desc *gdp;
u64 fsid; u64 fsid;
s64 bfree; s64 bfree;
if (test_opt(sb, MINIX_DF)) { if (test_opt(sb, MINIX_DF)) {
sbi->s_overhead_last = 0; sbi->s_overhead_last = 0;
} else if (es->s_overhead_clusters) {
sbi->s_overhead_last = le32_to_cpu(es->s_overhead_clusters);
} else if (sbi->s_blocks_last != ext4_blocks_count(es)) { } else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
ext4_group_t i, ngroups = ext4_get_groups_count(sb); ext4_group_t i, ngroups = ext4_get_groups_count(sb);
ext4_fsblk_t overhead = 0; ext4_fsblk_t overhead = 0;
...@@ -4604,24 +4622,16 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) ...@@ -4604,24 +4622,16 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
* All of the blocks before first_data_block are * All of the blocks before first_data_block are
* overhead * overhead
*/ */
overhead = le32_to_cpu(es->s_first_data_block); overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
/* /*
* Add the overhead attributed to the superblock and * Add the overhead found in each block group
* block group descriptors. If the sparse superblocks
* feature is turned on, then not all groups have this.
*/ */
for (i = 0; i < ngroups; i++) { for (i = 0; i < ngroups; i++) {
overhead += ext4_bg_has_super(sb, i) + gdp = ext4_get_group_desc(sb, i, NULL);
ext4_bg_num_gdb(sb, i); overhead += ext4_num_overhead_clusters(sb, i, gdp);
cond_resched(); cond_resched();
} }
/*
* Every block group has an inode bitmap, a block
* bitmap, and an inode table.
*/
overhead += ngroups * (2 + sbi->s_itb_per_group);
sbi->s_overhead_last = overhead; sbi->s_overhead_last = overhead;
smp_wmb(); smp_wmb();
sbi->s_blocks_last = ext4_blocks_count(es); sbi->s_blocks_last = ext4_blocks_count(es);
...@@ -4629,7 +4639,8 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) ...@@ -4629,7 +4639,8 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_type = EXT4_SUPER_MAGIC; buf->f_type = EXT4_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize; buf->f_bsize = sb->s_blocksize;
buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last; buf->f_blocks = (ext4_blocks_count(es) -
EXT4_C2B(sbi, sbi->s_overhead_last));
bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) - bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter); percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
/* prevent underflow in case that few free space is available */ /* prevent underflow in case that few free space is available */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment