Commit 89fd25be authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Use x-macros for data types

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 912bdf17
...@@ -53,10 +53,10 @@ static void pd_controllers_update(struct work_struct *work) ...@@ -53,10 +53,10 @@ static void pd_controllers_update(struct work_struct *work)
* reclaimed by copy GC * reclaimed by copy GC
*/ */
s64 fragmented = (bucket_to_sector(ca, s64 fragmented = (bucket_to_sector(ca,
stats.buckets[BCH_DATA_USER] + stats.buckets[BCH_DATA_user] +
stats.buckets[BCH_DATA_CACHED]) - stats.buckets[BCH_DATA_cached]) -
(stats.sectors[BCH_DATA_USER] + (stats.sectors[BCH_DATA_user] +
stats.sectors[BCH_DATA_CACHED])) << 9; stats.sectors[BCH_DATA_cached])) << 9;
fragmented = max(0LL, fragmented); fragmented = max(0LL, fragmented);
......
...@@ -534,7 +534,7 @@ static void get_buckets_from_writepoint(struct bch_fs *c, ...@@ -534,7 +534,7 @@ static void get_buckets_from_writepoint(struct bch_fs *c,
if (*nr_effective < nr_replicas && if (*nr_effective < nr_replicas &&
test_bit(ob->ptr.dev, devs_may_alloc->d) && test_bit(ob->ptr.dev, devs_may_alloc->d) &&
(ca->mi.durability || (ca->mi.durability ||
(wp->type == BCH_DATA_USER && !*have_cache)) && (wp->type == BCH_DATA_user && !*have_cache)) &&
(ob->ec || !need_ec)) { (ob->ec || !need_ec)) {
add_new_bucket(c, ptrs, devs_may_alloc, add_new_bucket(c, ptrs, devs_may_alloc,
nr_effective, have_cache, nr_effective, have_cache,
...@@ -813,11 +813,11 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c, ...@@ -813,11 +813,11 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
wp = writepoint_find(c, write_point.v); wp = writepoint_find(c, write_point.v);
if (wp->type == BCH_DATA_USER) if (wp->type == BCH_DATA_user)
ob_flags |= BUCKET_MAY_ALLOC_PARTIAL; ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
/* metadata may not allocate on cache devices: */ /* metadata may not allocate on cache devices: */
if (wp->type != BCH_DATA_USER) if (wp->type != BCH_DATA_user)
have_cache = true; have_cache = true;
if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) { if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
...@@ -856,7 +856,7 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c, ...@@ -856,7 +856,7 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
/* Free buckets we didn't use: */ /* Free buckets we didn't use: */
open_bucket_for_each(c, &wp->ptrs, ob, i) open_bucket_for_each(c, &wp->ptrs, ob, i)
open_bucket_free_unused(c, ob, wp->type == BCH_DATA_USER); open_bucket_free_unused(c, ob, wp->type == BCH_DATA_user);
wp->ptrs = ptrs; wp->ptrs = ptrs;
...@@ -876,7 +876,7 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c, ...@@ -876,7 +876,7 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
ob_push(c, &ptrs, ob); ob_push(c, &ptrs, ob);
else else
open_bucket_free_unused(c, ob, open_bucket_free_unused(c, ob,
wp->type == BCH_DATA_USER); wp->type == BCH_DATA_user);
wp->ptrs = ptrs; wp->ptrs = ptrs;
mutex_unlock(&wp->lock); mutex_unlock(&wp->lock);
...@@ -907,7 +907,7 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp, ...@@ -907,7 +907,7 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
struct bch_extent_ptr tmp = ob->ptr; struct bch_extent_ptr tmp = ob->ptr;
tmp.cached = !ca->mi.durability && tmp.cached = !ca->mi.durability &&
wp->type == BCH_DATA_USER; wp->type == BCH_DATA_user;
tmp.offset += ca->mi.bucket_size - ob->sectors_free; tmp.offset += ca->mi.bucket_size - ob->sectors_free;
bch2_bkey_append_ptr(k, tmp); bch2_bkey_append_ptr(k, tmp);
...@@ -956,12 +956,12 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c) ...@@ -956,12 +956,12 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c)
c->open_buckets_freelist = ob - c->open_buckets; c->open_buckets_freelist = ob - c->open_buckets;
} }
writepoint_init(&c->btree_write_point, BCH_DATA_BTREE); writepoint_init(&c->btree_write_point, BCH_DATA_btree);
writepoint_init(&c->rebalance_write_point, BCH_DATA_USER); writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
for (wp = c->write_points; for (wp = c->write_points;
wp < c->write_points + c->write_points_nr; wp++) { wp < c->write_points + c->write_points_nr; wp++) {
writepoint_init(wp, BCH_DATA_USER); writepoint_init(wp, BCH_DATA_user);
wp->last_used = sched_clock(); wp->last_used = sched_clock();
wp->write_point = (unsigned long) wp; wp->write_point = (unsigned long) wp;
......
...@@ -1030,14 +1030,19 @@ LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48); ...@@ -1030,14 +1030,19 @@ LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
/* BCH_SB_FIELD_replicas: */ /* BCH_SB_FIELD_replicas: */
#define BCH_DATA_TYPES() \
x(none, 0) \
x(sb, 1) \
x(journal, 2) \
x(btree, 3) \
x(user, 4) \
x(cached, 5)
enum bch_data_type { enum bch_data_type {
BCH_DATA_NONE = 0, #define x(t, n) BCH_DATA_##t,
BCH_DATA_SB = 1, BCH_DATA_TYPES()
BCH_DATA_JOURNAL = 2, #undef x
BCH_DATA_BTREE = 3, BCH_DATA_NR
BCH_DATA_USER = 4,
BCH_DATA_CACHED = 5,
BCH_DATA_NR = 6,
}; };
struct bch_replicas_entry_v0 { struct bch_replicas_entry_v0 {
......
...@@ -435,16 +435,16 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca, ...@@ -435,16 +435,16 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
if (offset == BCH_SB_SECTOR) if (offset == BCH_SB_SECTOR)
mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR, mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
BCH_DATA_SB, flags); BCH_DATA_sb, flags);
mark_metadata_sectors(c, ca, offset, mark_metadata_sectors(c, ca, offset,
offset + (1 << layout->sb_max_size_bits), offset + (1 << layout->sb_max_size_bits),
BCH_DATA_SB, flags); BCH_DATA_sb, flags);
} }
for (i = 0; i < ca->journal.nr; i++) { for (i = 0; i < ca->journal.nr; i++) {
b = ca->journal.buckets[i]; b = ca->journal.buckets[i];
bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_JOURNAL, bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
ca->mi.bucket_size, ca->mi.bucket_size,
gc_phase(GC_PHASE_SB), flags); gc_phase(GC_PHASE_SB), flags);
} }
...@@ -678,8 +678,8 @@ static int bch2_gc_done(struct bch_fs *c, ...@@ -678,8 +678,8 @@ static int bch2_gc_done(struct bch_fs *c,
char buf[80]; char buf[80];
if (metadata_only && if (metadata_only &&
(e->data_type == BCH_DATA_USER || (e->data_type == BCH_DATA_user ||
e->data_type == BCH_DATA_CACHED)) e->data_type == BCH_DATA_cached))
continue; continue;
bch2_replicas_entry_to_text(&PBUF(buf), e); bch2_replicas_entry_to_text(&PBUF(buf), e);
...@@ -764,8 +764,8 @@ static int bch2_gc_start(struct bch_fs *c, ...@@ -764,8 +764,8 @@ static int bch2_gc_start(struct bch_fs *c,
d->gen_valid = s->gen_valid; d->gen_valid = s->gen_valid;
if (metadata_only && if (metadata_only &&
(s->mark.data_type == BCH_DATA_USER || (s->mark.data_type == BCH_DATA_user ||
s->mark.data_type == BCH_DATA_CACHED)) { s->mark.data_type == BCH_DATA_cached)) {
d->_mark = s->mark; d->_mark = s->mark;
d->_mark.owned_by_allocator = 0; d->_mark.owned_by_allocator = 0;
} }
......
...@@ -1231,7 +1231,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, ...@@ -1231,7 +1231,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
set_btree_node_read_in_flight(b); set_btree_node_read_in_flight(b);
if (rb->have_ioref) { if (rb->have_ioref) {
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_BTREE], this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
bio_sectors(bio)); bio_sectors(bio));
bio_set_dev(bio, ca->disk_sb.bdev); bio_set_dev(bio, ca->disk_sb.bdev);
...@@ -1701,7 +1701,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, ...@@ -1701,7 +1701,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
b->written += sectors_to_write; b->written += sectors_to_write;
/* XXX: submitting IO with btree locks held: */ /* XXX: submitting IO with btree locks held: */
bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key); bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_btree, &k.key);
return; return;
err: err:
set_btree_node_noevict(b); set_btree_node_noevict(b);
......
...@@ -133,13 +133,13 @@ void bch2_fs_usage_initialize(struct bch_fs *c) ...@@ -133,13 +133,13 @@ void bch2_fs_usage_initialize(struct bch_fs *c)
cpu_replicas_entry(&c->replicas, i); cpu_replicas_entry(&c->replicas, i);
switch (e->data_type) { switch (e->data_type) {
case BCH_DATA_BTREE: case BCH_DATA_btree:
usage->btree += usage->replicas[i]; usage->btree += usage->replicas[i];
break; break;
case BCH_DATA_USER: case BCH_DATA_user:
usage->data += usage->replicas[i]; usage->data += usage->replicas[i];
break; break;
case BCH_DATA_CACHED: case BCH_DATA_cached:
usage->cached += usage->replicas[i]; usage->cached += usage->replicas[i];
break; break;
} }
...@@ -367,7 +367,7 @@ static inline int is_fragmented_bucket(struct bucket_mark m, ...@@ -367,7 +367,7 @@ static inline int is_fragmented_bucket(struct bucket_mark m,
struct bch_dev *ca) struct bch_dev *ca)
{ {
if (!m.owned_by_allocator && if (!m.owned_by_allocator &&
m.data_type == BCH_DATA_USER && m.data_type == BCH_DATA_user &&
bucket_sectors_used(m)) bucket_sectors_used(m))
return max_t(int, 0, (int) ca->mi.bucket_size - return max_t(int, 0, (int) ca->mi.bucket_size -
bucket_sectors_used(m)); bucket_sectors_used(m));
...@@ -382,7 +382,7 @@ static inline int bucket_stripe_sectors(struct bucket_mark m) ...@@ -382,7 +382,7 @@ static inline int bucket_stripe_sectors(struct bucket_mark m)
static inline enum bch_data_type bucket_type(struct bucket_mark m) static inline enum bch_data_type bucket_type(struct bucket_mark m)
{ {
return m.cached_sectors && !m.dirty_sectors return m.cached_sectors && !m.dirty_sectors
? BCH_DATA_CACHED ? BCH_DATA_cached
: m.data_type; : m.data_type;
} }
...@@ -437,7 +437,7 @@ static inline void account_bucket(struct bch_fs_usage *fs_usage, ...@@ -437,7 +437,7 @@ static inline void account_bucket(struct bch_fs_usage *fs_usage,
enum bch_data_type type, enum bch_data_type type,
int nr, s64 size) int nr, s64 size)
{ {
if (type == BCH_DATA_SB || type == BCH_DATA_JOURNAL) if (type == BCH_DATA_sb || type == BCH_DATA_journal)
fs_usage->hidden += size; fs_usage->hidden += size;
dev_usage->buckets[type] += nr; dev_usage->buckets[type] += nr;
...@@ -472,7 +472,7 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, ...@@ -472,7 +472,7 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
u->sectors[old.data_type] -= old.dirty_sectors; u->sectors[old.data_type] -= old.dirty_sectors;
u->sectors[new.data_type] += new.dirty_sectors; u->sectors[new.data_type] += new.dirty_sectors;
u->sectors[BCH_DATA_CACHED] += u->sectors[BCH_DATA_cached] +=
(int) new.cached_sectors - (int) old.cached_sectors; (int) new.cached_sectors - (int) old.cached_sectors;
u->sectors_fragmented += u->sectors_fragmented +=
is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca); is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
...@@ -520,13 +520,13 @@ static inline int update_replicas(struct bch_fs *c, ...@@ -520,13 +520,13 @@ static inline int update_replicas(struct bch_fs *c,
return 0; return 0;
switch (r->data_type) { switch (r->data_type) {
case BCH_DATA_BTREE: case BCH_DATA_btree:
fs_usage->btree += sectors; fs_usage->btree += sectors;
break; break;
case BCH_DATA_USER: case BCH_DATA_user:
fs_usage->data += sectors; fs_usage->data += sectors;
break; break;
case BCH_DATA_CACHED: case BCH_DATA_cached:
fs_usage->cached += sectors; fs_usage->cached += sectors;
break; break;
} }
...@@ -798,8 +798,8 @@ static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -798,8 +798,8 @@ static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
struct bucket_mark old, new; struct bucket_mark old, new;
bool overflow; bool overflow;
BUG_ON(data_type != BCH_DATA_SB && BUG_ON(data_type != BCH_DATA_sb &&
data_type != BCH_DATA_JOURNAL); data_type != BCH_DATA_journal);
old = bucket_cmpxchg(g, new, ({ old = bucket_cmpxchg(g, new, ({
new.data_type = data_type; new.data_type = data_type;
...@@ -830,8 +830,8 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -830,8 +830,8 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
unsigned sectors, struct gc_pos pos, unsigned sectors, struct gc_pos pos,
unsigned flags) unsigned flags)
{ {
BUG_ON(type != BCH_DATA_SB && BUG_ON(type != BCH_DATA_sb &&
type != BCH_DATA_JOURNAL); type != BCH_DATA_journal);
preempt_disable(); preempt_disable();
...@@ -1123,7 +1123,7 @@ static int bch2_mark_extent(struct bch_fs *c, ...@@ -1123,7 +1123,7 @@ static int bch2_mark_extent(struct bch_fs *c,
BUG_ON(!sectors); BUG_ON(!sectors);
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
s64 disk_sectors = data_type == BCH_DATA_BTREE s64 disk_sectors = data_type == BCH_DATA_btree
? sectors ? sectors
: ptr_disk_sectors_delta(p, offset, sectors, flags); : ptr_disk_sectors_delta(p, offset, sectors, flags);
...@@ -1285,12 +1285,12 @@ static int bch2_mark_key_locked(struct bch_fs *c, ...@@ -1285,12 +1285,12 @@ static int bch2_mark_key_locked(struct bch_fs *c,
: -c->opts.btree_node_size; : -c->opts.btree_node_size;
ret = bch2_mark_extent(c, old, new, offset, sectors, ret = bch2_mark_extent(c, old, new, offset, sectors,
BCH_DATA_BTREE, fs_usage, journal_seq, flags); BCH_DATA_btree, fs_usage, journal_seq, flags);
break; break;
case KEY_TYPE_extent: case KEY_TYPE_extent:
case KEY_TYPE_reflink_v: case KEY_TYPE_reflink_v:
ret = bch2_mark_extent(c, old, new, offset, sectors, ret = bch2_mark_extent(c, old, new, offset, sectors,
BCH_DATA_USER, fs_usage, journal_seq, flags); BCH_DATA_user, fs_usage, journal_seq, flags);
break; break;
case KEY_TYPE_stripe: case KEY_TYPE_stripe:
ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags); ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags);
...@@ -1668,7 +1668,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans, ...@@ -1668,7 +1668,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans,
BUG_ON(!sectors); BUG_ON(!sectors);
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
s64 disk_sectors = data_type == BCH_DATA_BTREE s64 disk_sectors = data_type == BCH_DATA_btree
? sectors ? sectors
: ptr_disk_sectors_delta(p, offset, sectors, flags); : ptr_disk_sectors_delta(p, offset, sectors, flags);
...@@ -1810,11 +1810,11 @@ int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k, ...@@ -1810,11 +1810,11 @@ int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
: -c->opts.btree_node_size; : -c->opts.btree_node_size;
return bch2_trans_mark_extent(trans, k, offset, sectors, return bch2_trans_mark_extent(trans, k, offset, sectors,
flags, BCH_DATA_BTREE); flags, BCH_DATA_btree);
case KEY_TYPE_extent: case KEY_TYPE_extent:
case KEY_TYPE_reflink_v: case KEY_TYPE_reflink_v:
return bch2_trans_mark_extent(trans, k, offset, sectors, return bch2_trans_mark_extent(trans, k, offset, sectors,
flags, BCH_DATA_USER); flags, BCH_DATA_user);
case KEY_TYPE_inode: case KEY_TYPE_inode:
d = replicas_deltas_realloc(trans, 0); d = replicas_deltas_realloc(trans, 0);
......
...@@ -99,9 +99,9 @@ static inline enum bch_data_type ptr_data_type(const struct bkey *k, ...@@ -99,9 +99,9 @@ static inline enum bch_data_type ptr_data_type(const struct bkey *k,
{ {
if (k->type == KEY_TYPE_btree_ptr || if (k->type == KEY_TYPE_btree_ptr ||
k->type == KEY_TYPE_btree_ptr_v2) k->type == KEY_TYPE_btree_ptr_v2)
return BCH_DATA_BTREE; return BCH_DATA_btree;
return ptr->cached ? BCH_DATA_CACHED : BCH_DATA_USER; return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
} }
static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca, static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca,
......
...@@ -1144,7 +1144,7 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target, ...@@ -1144,7 +1144,7 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
h->redundancy = redundancy; h->redundancy = redundancy;
rcu_read_lock(); rcu_read_lock();
h->devs = target_rw_devs(c, BCH_DATA_USER, target); h->devs = target_rw_devs(c, BCH_DATA_user, target);
for_each_member_device_rcu(ca, c, i, &h->devs) for_each_member_device_rcu(ca, c, i, &h->devs)
if (!ca->mi.durability) if (!ca->mi.durability)
......
...@@ -193,7 +193,7 @@ void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k) ...@@ -193,7 +193,7 @@ void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
goto err; goto err;
err = "inconsistent"; err = "inconsistent";
if (mark.data_type != BCH_DATA_BTREE || if (mark.data_type != BCH_DATA_btree ||
mark.dirty_sectors < c->opts.btree_node_size) mark.dirty_sectors < c->opts.btree_node_size)
goto err; goto err;
} }
...@@ -288,7 +288,7 @@ void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k) ...@@ -288,7 +288,7 @@ void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
"key too stale: %i", stale); "key too stale: %i", stale);
bch2_fs_inconsistent_on(!stale && bch2_fs_inconsistent_on(!stale &&
(mark.data_type != BCH_DATA_USER || (mark.data_type != BCH_DATA_user ||
mark_sectors < disk_sectors), c, mark_sectors < disk_sectors), c,
"extent pointer not marked: %s:\n" "extent pointer not marked: %s:\n"
"type %u sectors %u < %u", "type %u sectors %u < %u",
......
...@@ -486,7 +486,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, ...@@ -486,7 +486,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
bio_set_dev(&n->bio, ca->disk_sb.bdev); bio_set_dev(&n->bio, ca->disk_sb.bdev);
if (type != BCH_DATA_BTREE && unlikely(c->opts.no_data_io)) { if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
bio_endio(&n->bio); bio_endio(&n->bio);
continue; continue;
} }
...@@ -1128,7 +1128,7 @@ static void __bch2_write(struct closure *cl) ...@@ -1128,7 +1128,7 @@ static void __bch2_write(struct closure *cl)
key_to_write = (void *) (op->insert_keys.keys_p + key_to_write = (void *) (op->insert_keys.keys_p +
key_to_write_offset); key_to_write_offset);
bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_USER, bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
key_to_write); key_to_write);
} while (ret); } while (ret);
...@@ -2170,7 +2170,7 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig, ...@@ -2170,7 +2170,7 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
goto out; goto out;
} }
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_USER], this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user],
bio_sectors(&rbio->bio)); bio_sectors(&rbio->bio));
bio_set_dev(&rbio->bio, ca->disk_sb.bdev); bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
......
...@@ -846,7 +846,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, ...@@ -846,7 +846,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
if (pos <= ja->cur_idx) if (pos <= ja->cur_idx)
ja->cur_idx = (ja->cur_idx + 1) % ja->nr; ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_JOURNAL, bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_journal,
ca->mi.bucket_size, ca->mi.bucket_size,
gc_phase(GC_PHASE_SB), gc_phase(GC_PHASE_SB),
0); 0);
...@@ -1198,7 +1198,7 @@ ssize_t bch2_journal_print_debug(struct journal *j, char *buf) ...@@ -1198,7 +1198,7 @@ ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
test_bit(JOURNAL_REPLAY_DONE, &j->flags)); test_bit(JOURNAL_REPLAY_DONE, &j->flags));
for_each_member_device_rcu(ca, c, iter, for_each_member_device_rcu(ca, c, iter,
&c->rw_devs[BCH_DATA_JOURNAL]) { &c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal; struct journal_device *ja = &ca->journal;
if (!ja->nr) if (!ja->nr)
......
...@@ -660,7 +660,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list) ...@@ -660,7 +660,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list)
for_each_member_device(ca, c, iter) { for_each_member_device(ca, c, iter) {
if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) && if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_JOURNAL))) !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
continue; continue;
if ((ca->mi.state == BCH_MEMBER_STATE_RW || if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
...@@ -694,7 +694,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list) ...@@ -694,7 +694,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list)
* the devices - this is wrong: * the devices - this is wrong:
*/ */
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, i->devs); bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, i->devs);
if (!degraded && if (!degraded &&
(test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) || (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
...@@ -795,7 +795,7 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w, ...@@ -795,7 +795,7 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w,
rcu_read_lock(); rcu_read_lock();
devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe,
&c->rw_devs[BCH_DATA_JOURNAL]); &c->rw_devs[BCH_DATA_journal]);
__journal_write_alloc(j, w, &devs_sorted, __journal_write_alloc(j, w, &devs_sorted,
sectors, &replicas, replicas_want); sectors, &replicas, replicas_want);
...@@ -913,7 +913,7 @@ static void journal_write_done(struct closure *cl) ...@@ -913,7 +913,7 @@ static void journal_write_done(struct closure *cl)
goto err; goto err;
} }
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, devs); bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, devs);
if (bch2_mark_replicas(c, &replicas.e)) if (bch2_mark_replicas(c, &replicas.e))
goto err; goto err;
...@@ -1105,7 +1105,7 @@ void bch2_journal_write(struct closure *cl) ...@@ -1105,7 +1105,7 @@ void bch2_journal_write(struct closure *cl)
continue; continue;
} }
this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL], this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
sectors); sectors);
bio = ca->journal.bio; bio = ca->journal.bio;
......
...@@ -70,7 +70,7 @@ static struct journal_space { ...@@ -70,7 +70,7 @@ static struct journal_space {
rcu_read_lock(); rcu_read_lock();
for_each_member_device_rcu(ca, c, i, for_each_member_device_rcu(ca, c, i,
&c->rw_devs[BCH_DATA_JOURNAL]) { &c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal; struct journal_device *ja = &ca->journal;
unsigned buckets_this_device, sectors_this_device; unsigned buckets_this_device, sectors_this_device;
...@@ -139,7 +139,7 @@ void bch2_journal_space_available(struct journal *j) ...@@ -139,7 +139,7 @@ void bch2_journal_space_available(struct journal *j)
rcu_read_lock(); rcu_read_lock();
for_each_member_device_rcu(ca, c, i, for_each_member_device_rcu(ca, c, i,
&c->rw_devs[BCH_DATA_JOURNAL]) { &c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal; struct journal_device *ja = &ca->journal;
if (!ja->nr) if (!ja->nr)
...@@ -618,7 +618,7 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx) ...@@ -618,7 +618,7 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
return ret; return ret;
mutex_lock(&c->replicas_gc_lock); mutex_lock(&c->replicas_gc_lock);
bch2_replicas_gc_start(c, 1 << BCH_DATA_JOURNAL); bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
seq = 0; seq = 0;
...@@ -627,7 +627,7 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx) ...@@ -627,7 +627,7 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
struct bch_replicas_padded replicas; struct bch_replicas_padded replicas;
seq = max(seq, journal_last_seq(j)); seq = max(seq, journal_last_seq(j));
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
journal_seq_pin(j, seq)->devs); journal_seq_pin(j, seq)->devs);
seq++; seq++;
......
...@@ -516,7 +516,7 @@ static int __bch2_move_data(struct bch_fs *c, ...@@ -516,7 +516,7 @@ static int __bch2_move_data(struct bch_fs *c,
bkey_on_stack_init(&sk); bkey_on_stack_init(&sk);
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
stats->data_type = BCH_DATA_USER; stats->data_type = BCH_DATA_user;
stats->btree_id = btree_id; stats->btree_id = btree_id;
stats->pos = POS_MIN; stats->pos = POS_MIN;
...@@ -641,7 +641,7 @@ int bch2_move_data(struct bch_fs *c, ...@@ -641,7 +641,7 @@ int bch2_move_data(struct bch_fs *c,
INIT_LIST_HEAD(&ctxt.reads); INIT_LIST_HEAD(&ctxt.reads);
init_waitqueue_head(&ctxt.wait); init_waitqueue_head(&ctxt.wait);
stats->data_type = BCH_DATA_USER; stats->data_type = BCH_DATA_user;
ret = __bch2_move_data(c, &ctxt, rate, wp, start, end, ret = __bch2_move_data(c, &ctxt, rate, wp, start, end,
pred, arg, stats, BTREE_ID_EXTENTS) ?: pred, arg, stats, BTREE_ID_EXTENTS) ?:
...@@ -676,7 +676,7 @@ static int bch2_move_btree(struct bch_fs *c, ...@@ -676,7 +676,7 @@ static int bch2_move_btree(struct bch_fs *c,
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
stats->data_type = BCH_DATA_BTREE; stats->data_type = BCH_DATA_btree;
for (id = 0; id < BTREE_ID_NR; id++) { for (id = 0; id < BTREE_ID_NR; id++) {
stats->btree_id = id; stats->btree_id = id;
...@@ -772,7 +772,7 @@ int bch2_data_job(struct bch_fs *c, ...@@ -772,7 +772,7 @@ int bch2_data_job(struct bch_fs *c,
switch (op.op) { switch (op.op) {
case BCH_DATA_OP_REREPLICATE: case BCH_DATA_OP_REREPLICATE:
stats->data_type = BCH_DATA_JOURNAL; stats->data_type = BCH_DATA_journal;
ret = bch2_journal_flush_device_pins(&c->journal, -1); ret = bch2_journal_flush_device_pins(&c->journal, -1);
ret = bch2_move_btree(c, rereplicate_pred, c, stats) ?: ret; ret = bch2_move_btree(c, rereplicate_pred, c, stats) ?: ret;
...@@ -793,7 +793,7 @@ int bch2_data_job(struct bch_fs *c, ...@@ -793,7 +793,7 @@ int bch2_data_job(struct bch_fs *c,
if (op.migrate.dev >= c->sb.nr_devices) if (op.migrate.dev >= c->sb.nr_devices)
return -EINVAL; return -EINVAL;
stats->data_type = BCH_DATA_JOURNAL; stats->data_type = BCH_DATA_journal;
ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev); ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
ret = bch2_move_btree(c, migrate_pred, &op, stats) ?: ret; ret = bch2_move_btree(c, migrate_pred, &op, stats) ?: ret;
......
...@@ -160,7 +160,7 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca) ...@@ -160,7 +160,7 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca)
struct copygc_heap_entry e; struct copygc_heap_entry e;
if (m.owned_by_allocator || if (m.owned_by_allocator ||
m.data_type != BCH_DATA_USER || m.data_type != BCH_DATA_user ||
!bucket_sectors_used(m) || !bucket_sectors_used(m) ||
bucket_sectors_used(m) >= ca->mi.bucket_size) bucket_sectors_used(m) >= ca->mi.bucket_size)
continue; continue;
......
...@@ -45,12 +45,9 @@ const char * const bch2_str_hash_types[] = { ...@@ -45,12 +45,9 @@ const char * const bch2_str_hash_types[] = {
}; };
const char * const bch2_data_types[] = { const char * const bch2_data_types[] = {
"none", #define x(t, n) #t,
"sb", BCH_DATA_TYPES()
"journal", #undef x
"btree",
"data",
"cached",
NULL NULL
}; };
......
...@@ -113,16 +113,16 @@ void bch2_bkey_to_replicas(struct bch_replicas_entry *e, ...@@ -113,16 +113,16 @@ void bch2_bkey_to_replicas(struct bch_replicas_entry *e,
switch (k.k->type) { switch (k.k->type) {
case KEY_TYPE_btree_ptr: case KEY_TYPE_btree_ptr:
case KEY_TYPE_btree_ptr_v2: case KEY_TYPE_btree_ptr_v2:
e->data_type = BCH_DATA_BTREE; e->data_type = BCH_DATA_btree;
extent_to_replicas(k, e); extent_to_replicas(k, e);
break; break;
case KEY_TYPE_extent: case KEY_TYPE_extent:
case KEY_TYPE_reflink_v: case KEY_TYPE_reflink_v:
e->data_type = BCH_DATA_USER; e->data_type = BCH_DATA_user;
extent_to_replicas(k, e); extent_to_replicas(k, e);
break; break;
case KEY_TYPE_stripe: case KEY_TYPE_stripe:
e->data_type = BCH_DATA_USER; e->data_type = BCH_DATA_user;
stripe_to_replicas(k, e); stripe_to_replicas(k, e);
break; break;
} }
...@@ -137,7 +137,7 @@ void bch2_devlist_to_replicas(struct bch_replicas_entry *e, ...@@ -137,7 +137,7 @@ void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
unsigned i; unsigned i;
BUG_ON(!data_type || BUG_ON(!data_type ||
data_type == BCH_DATA_SB || data_type == BCH_DATA_sb ||
data_type >= BCH_DATA_NR); data_type >= BCH_DATA_NR);
e->data_type = data_type; e->data_type = data_type;
...@@ -614,7 +614,7 @@ int bch2_replicas_gc2(struct bch_fs *c) ...@@ -614,7 +614,7 @@ int bch2_replicas_gc2(struct bch_fs *c)
struct bch_replicas_entry *e = struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i); cpu_replicas_entry(&c->replicas, i);
if (e->data_type == BCH_DATA_JOURNAL || if (e->data_type == BCH_DATA_journal ||
c->usage_base->replicas[i] || c->usage_base->replicas[i] ||
percpu_u64_get(&c->usage[0]->replicas[i]) || percpu_u64_get(&c->usage[0]->replicas[i]) ||
percpu_u64_get(&c->usage[1]->replicas[i])) percpu_u64_get(&c->usage[1]->replicas[i]))
...@@ -1040,13 +1040,13 @@ static bool have_enough_devs(struct replicas_status s, ...@@ -1040,13 +1040,13 @@ static bool have_enough_devs(struct replicas_status s,
bool bch2_have_enough_devs(struct replicas_status s, unsigned flags) bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
{ {
return (have_enough_devs(s, BCH_DATA_JOURNAL, return (have_enough_devs(s, BCH_DATA_journal,
flags & BCH_FORCE_IF_METADATA_DEGRADED, flags & BCH_FORCE_IF_METADATA_DEGRADED,
flags & BCH_FORCE_IF_METADATA_LOST) && flags & BCH_FORCE_IF_METADATA_LOST) &&
have_enough_devs(s, BCH_DATA_BTREE, have_enough_devs(s, BCH_DATA_btree,
flags & BCH_FORCE_IF_METADATA_DEGRADED, flags & BCH_FORCE_IF_METADATA_DEGRADED,
flags & BCH_FORCE_IF_METADATA_LOST) && flags & BCH_FORCE_IF_METADATA_LOST) &&
have_enough_devs(s, BCH_DATA_USER, have_enough_devs(s, BCH_DATA_user,
flags & BCH_FORCE_IF_DATA_DEGRADED, flags & BCH_FORCE_IF_DATA_DEGRADED,
flags & BCH_FORCE_IF_DATA_LOST)); flags & BCH_FORCE_IF_DATA_LOST));
} }
...@@ -1056,9 +1056,9 @@ int bch2_replicas_online(struct bch_fs *c, bool meta) ...@@ -1056,9 +1056,9 @@ int bch2_replicas_online(struct bch_fs *c, bool meta)
struct replicas_status s = bch2_replicas_status(c); struct replicas_status s = bch2_replicas_status(c);
return (meta return (meta
? min(s.replicas[BCH_DATA_JOURNAL].redundancy, ? min(s.replicas[BCH_DATA_journal].redundancy,
s.replicas[BCH_DATA_BTREE].redundancy) s.replicas[BCH_DATA_btree].redundancy)
: s.replicas[BCH_DATA_USER].redundancy) + 1; : s.replicas[BCH_DATA_user].redundancy) + 1;
} }
unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca) unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
......
...@@ -36,7 +36,7 @@ int bch2_mark_bkey_replicas(struct bch_fs *, struct bkey_s_c); ...@@ -36,7 +36,7 @@ int bch2_mark_bkey_replicas(struct bch_fs *, struct bkey_s_c);
static inline void bch2_replicas_entry_cached(struct bch_replicas_entry *e, static inline void bch2_replicas_entry_cached(struct bch_replicas_entry *e,
unsigned dev) unsigned dev)
{ {
e->data_type = BCH_DATA_CACHED; e->data_type = BCH_DATA_cached;
e->nr_devs = 1; e->nr_devs = 1;
e->nr_required = 1; e->nr_required = 1;
e->devs[0] = dev; e->devs[0] = dev;
......
...@@ -659,7 +659,7 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca) ...@@ -659,7 +659,7 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
bio->bi_private = ca; bio->bi_private = ca;
bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE); bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE);
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_SB], this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb],
bio_sectors(bio)); bio_sectors(bio));
percpu_ref_get(&ca->io_ref); percpu_ref_get(&ca->io_ref);
...@@ -685,7 +685,7 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx) ...@@ -685,7 +685,7 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
roundup((size_t) vstruct_bytes(sb), roundup((size_t) vstruct_bytes(sb),
bdev_logical_block_size(ca->disk_sb.bdev))); bdev_logical_block_size(ca->disk_sb.bdev)));
this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_SB], this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_sb],
bio_sectors(bio)); bio_sectors(bio));
percpu_ref_get(&ca->io_ref); percpu_ref_get(&ca->io_ref);
......
...@@ -1076,7 +1076,7 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c, ...@@ -1076,7 +1076,7 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
init_rwsem(&ca->bucket_lock); init_rwsem(&ca->bucket_lock);
writepoint_init(&ca->copygc_write_point, BCH_DATA_USER); writepoint_init(&ca->copygc_write_point, BCH_DATA_user);
bch2_dev_copygc_init(ca); bch2_dev_copygc_init(ca);
...@@ -1207,7 +1207,7 @@ static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb) ...@@ -1207,7 +1207,7 @@ static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
return ret; return ret;
if (test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags) && if (test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags) &&
!percpu_u64_get(&ca->usage[0]->buckets[BCH_DATA_SB])) { !percpu_u64_get(&ca->usage[0]->buckets[BCH_DATA_sb])) {
mutex_lock(&c->sb_lock); mutex_lock(&c->sb_lock);
bch2_mark_dev_superblock(ca->fs, ca, 0); bch2_mark_dev_superblock(ca->fs, ca, 0);
mutex_unlock(&c->sb_lock); mutex_unlock(&c->sb_lock);
......
...@@ -868,18 +868,18 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf) ...@@ -868,18 +868,18 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size, fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size, fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
ca->mi.nbuckets - ca->mi.first_bucket, ca->mi.nbuckets - ca->mi.first_bucket,
stats.buckets[BCH_DATA_SB], stats.buckets[BCH_DATA_sb],
stats.buckets[BCH_DATA_JOURNAL], stats.buckets[BCH_DATA_journal],
stats.buckets[BCH_DATA_BTREE], stats.buckets[BCH_DATA_btree],
stats.buckets[BCH_DATA_USER], stats.buckets[BCH_DATA_user],
stats.buckets[BCH_DATA_CACHED], stats.buckets[BCH_DATA_cached],
stats.buckets_ec, stats.buckets_ec,
ca->mi.nbuckets - ca->mi.first_bucket - stats.buckets_unavailable, ca->mi.nbuckets - ca->mi.first_bucket - stats.buckets_unavailable,
stats.sectors[BCH_DATA_SB], stats.sectors[BCH_DATA_sb],
stats.sectors[BCH_DATA_JOURNAL], stats.sectors[BCH_DATA_journal],
stats.sectors[BCH_DATA_BTREE], stats.sectors[BCH_DATA_btree],
stats.sectors[BCH_DATA_USER], stats.sectors[BCH_DATA_user],
stats.sectors[BCH_DATA_CACHED], stats.sectors[BCH_DATA_cached],
stats.sectors_ec, stats.sectors_ec,
stats.sectors_fragmented, stats.sectors_fragmented,
ca->copygc_threshold, ca->copygc_threshold,
...@@ -887,8 +887,8 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf) ...@@ -887,8 +887,8 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
c->open_buckets_nr_free, OPEN_BUCKETS_COUNT, c->open_buckets_nr_free, OPEN_BUCKETS_COUNT,
BTREE_NODE_OPEN_BUCKET_RESERVE, BTREE_NODE_OPEN_BUCKET_RESERVE,
c->open_buckets_wait.list.first ? "waiting" : "empty", c->open_buckets_wait.list.first ? "waiting" : "empty",
nr[BCH_DATA_BTREE], nr[BCH_DATA_btree],
nr[BCH_DATA_USER], nr[BCH_DATA_user],
c->btree_reserve_cache_nr); c->btree_reserve_cache_nr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment