Commit 09f3297a authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: kill s_alloc, use bch_data_type

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent a7c7a309
...@@ -184,7 +184,7 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b, ...@@ -184,7 +184,7 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b,
*/ */
replicas = bch2_extent_nr_dirty_ptrs(k); replicas = bch2_extent_nr_dirty_ptrs(k);
if (replicas) if (replicas)
stats->s[replicas - 1].data[S_META] -= c->opts.btree_node_size; stats->s[replicas - 1].data[BCH_DATA_BTREE] -= c->opts.btree_node_size;
/* /*
* We're dropping @k from the btree, but it's still live until the * We're dropping @k from the btree, but it's still live until the
......
...@@ -82,16 +82,14 @@ static void bch2_fs_stats_verify(struct bch_fs *c) ...@@ -82,16 +82,14 @@ static void bch2_fs_stats_verify(struct bch_fs *c)
{ {
struct bch_fs_usage stats = struct bch_fs_usage stats =
__bch2_fs_usage_read(c); __bch2_fs_usage_read(c);
unsigned i; unsigned i, j;
for (i = 0; i < ARRAY_SIZE(stats.s); i++) { for (i = 0; i < ARRAY_SIZE(stats.s); i++) {
if ((s64) stats.s[i].data[S_META] < 0) for (j = 0; j < ARRAY_SIZE(stats.s[i].data); j++)
panic("replicas %u meta underflow: %lli\n", if ((s64) stats.s[i].data[j] < 0)
i + 1, stats.s[i].data[S_META]); panic("replicas %u %s underflow: %lli\n",
i + 1, bch_data_types[j],
if ((s64) stats.s[i].data[S_DIRTY] < 0) stats.s[i].data[j]);
panic("replicas %u dirty underflow: %lli\n",
i + 1, stats.s[i].data[S_DIRTY]);
if ((s64) stats.s[i].persistent_reserved < 0) if ((s64) stats.s[i].persistent_reserved < 0)
panic("replicas %u reserved underflow: %lli\n", panic("replicas %u reserved underflow: %lli\n",
...@@ -247,12 +245,16 @@ struct fs_usage_sum { ...@@ -247,12 +245,16 @@ struct fs_usage_sum {
static inline struct fs_usage_sum __fs_usage_sum(struct bch_fs_usage stats) static inline struct fs_usage_sum __fs_usage_sum(struct bch_fs_usage stats)
{ {
struct fs_usage_sum sum = { 0 }; struct fs_usage_sum sum = { 0 };
unsigned i; unsigned i, j;
for (i = 0; i < ARRAY_SIZE(stats.s); i++) { for (i = 0; i < ARRAY_SIZE(stats.s); i++) {
sum.data += (stats.s[i].data[S_META] + u64 a = 0;
stats.s[i].data[S_DIRTY]) * (i + 1);
sum.reserved += stats.s[i].persistent_reserved * (i + 1); for (j = 0; j < ARRAY_SIZE(stats.s[i].data); j++)
a += stats.s[i].data[j];
sum.data += a * (i + 1);
sum.reserved += stats.s[i].persistent_reserved * (i + 1);
} }
sum.reserved += stats.online_reserved; sum.reserved += stats.online_reserved;
...@@ -641,8 +643,6 @@ void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, ...@@ -641,8 +643,6 @@ void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
struct bkey_s_c_extent e = bkey_s_c_to_extent(k); struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const struct bch_extent_ptr *ptr; const struct bch_extent_ptr *ptr;
struct bch_extent_crc_unpacked crc; struct bch_extent_crc_unpacked crc;
enum s_alloc type = data_type == BCH_DATA_USER
? S_DIRTY : S_META;
unsigned replicas = 0; unsigned replicas = 0;
BUG_ON(!sectors); BUG_ON(!sectors);
...@@ -655,7 +655,7 @@ void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, ...@@ -655,7 +655,7 @@ void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
if (replicas) { if (replicas) {
BUG_ON(replicas - 1 > ARRAY_SIZE(stats->s)); BUG_ON(replicas - 1 > ARRAY_SIZE(stats->s));
stats->s[replicas - 1].data[type] += sectors; stats->s[replicas - 1].data[data_type] += sectors;
} }
break; break;
} }
......
...@@ -168,18 +168,6 @@ static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca) ...@@ -168,18 +168,6 @@ static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
/* Filesystem usage: */ /* Filesystem usage: */
static inline enum bch_data_type s_alloc_to_data_type(enum s_alloc s)
{
switch (s) {
case S_META:
return BCH_DATA_BTREE;
case S_DIRTY:
return BCH_DATA_USER;
default:
BUG();
}
}
struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *); struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *);
struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *); struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *);
void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *, void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#ifndef _BUCKETS_TYPES_H #ifndef _BUCKETS_TYPES_H
#define _BUCKETS_TYPES_H #define _BUCKETS_TYPES_H
#include "bcachefs_format.h"
#include "util.h" #include "util.h"
#define BUCKET_JOURNAL_SEQ_BITS 16 #define BUCKET_JOURNAL_SEQ_BITS 16
...@@ -59,13 +60,6 @@ struct bch_dev_usage { ...@@ -59,13 +60,6 @@ struct bch_dev_usage {
u64 sectors_fragmented; u64 sectors_fragmented;
}; };
/* kill, switch to bch_data_type? */
enum s_alloc {
S_META,
S_DIRTY,
S_ALLOC_NR,
};
struct bch_fs_usage { struct bch_fs_usage {
/* all fields are in units of 512 byte sectors: */ /* all fields are in units of 512 byte sectors: */
/* _uncompressed_ sectors: */ /* _uncompressed_ sectors: */
...@@ -73,7 +67,7 @@ struct bch_fs_usage { ...@@ -73,7 +67,7 @@ struct bch_fs_usage {
u64 available_cache; u64 available_cache;
struct { struct {
u64 data[S_ALLOC_NR]; u64 data[BCH_DATA_NR];
u64 persistent_reserved; u64 persistent_reserved;
} s[BCH_REPLICAS_MAX]; } s[BCH_REPLICAS_MAX];
}; };
......
...@@ -406,9 +406,8 @@ static long bch2_ioctl_usage(struct bch_fs *c, ...@@ -406,9 +406,8 @@ static long bch2_ioctl_usage(struct bch_fs *c,
dst.persistent_reserved[i] = dst.persistent_reserved[i] =
src.s[i].persistent_reserved; src.s[i].persistent_reserved;
for (j = 0; j < S_ALLOC_NR; j++) for (j = 0; j < BCH_DATA_NR; j++)
dst.sectors[s_alloc_to_data_type(j)][i] = dst.sectors[j][i] = src.s[i].data[j];
src.s[i].data[j];
} }
ret = copy_to_user(&user_arg->fs, &dst, sizeof(dst)); ret = copy_to_user(&user_arg->fs, &dst, sizeof(dst));
......
...@@ -230,41 +230,34 @@ static size_t bch2_btree_cache_size(struct bch_fs *c) ...@@ -230,41 +230,34 @@ static size_t bch2_btree_cache_size(struct bch_fs *c)
static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf) static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
{ {
char *out = buf, *end = buf + PAGE_SIZE;
struct bch_fs_usage stats = bch2_fs_usage_read(c); struct bch_fs_usage stats = bch2_fs_usage_read(c);
unsigned replicas, type;
out += scnprintf(out, end - out,
"capacity:\t\t%llu\n",
c->capacity);
for (replicas = 0; replicas < ARRAY_SIZE(stats.s); replicas++) {
out += scnprintf(out, end - out,
"%u replicas:\n",
replicas + 1);
for (type = BCH_DATA_SB; type < BCH_DATA_NR; type++)
out += scnprintf(out, end - out,
"\t%s:\t\t%llu\n",
bch2_data_types[type],
stats.s[replicas].data[type]);
out += scnprintf(out, end - out,
"\treserved:\t%llu\n",
stats.s[replicas].persistent_reserved);
}
return scnprintf(buf, PAGE_SIZE, out += scnprintf(out, end - out,
"capacity:\t\t%llu\n"
"1 replicas:\n"
"\tmeta:\t\t%llu\n"
"\tdirty:\t\t%llu\n"
"\treserved:\t%llu\n"
"2 replicas:\n"
"\tmeta:\t\t%llu\n"
"\tdirty:\t\t%llu\n"
"\treserved:\t%llu\n"
"3 replicas:\n"
"\tmeta:\t\t%llu\n"
"\tdirty:\t\t%llu\n"
"\treserved:\t%llu\n"
"4 replicas:\n"
"\tmeta:\t\t%llu\n"
"\tdirty:\t\t%llu\n"
"\treserved:\t%llu\n"
"online reserved:\t%llu\n", "online reserved:\t%llu\n",
c->capacity,
stats.s[0].data[S_META],
stats.s[0].data[S_DIRTY],
stats.s[0].persistent_reserved,
stats.s[1].data[S_META],
stats.s[1].data[S_DIRTY],
stats.s[1].persistent_reserved,
stats.s[2].data[S_META],
stats.s[2].data[S_DIRTY],
stats.s[2].persistent_reserved,
stats.s[3].data[S_META],
stats.s[3].data[S_DIRTY],
stats.s[3].persistent_reserved,
stats.online_reserved); stats.online_reserved);
return out - buf;
} }
static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf) static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment