Commit 5e82a9a1 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Write out fs usage consistently

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent fca1223c
...@@ -646,11 +646,15 @@ struct bch_fs { ...@@ -646,11 +646,15 @@ struct bch_fs {
struct percpu_rw_semaphore mark_lock; struct percpu_rw_semaphore mark_lock;
seqcount_t usage_lock;
struct bch_fs_usage *usage_base;
struct bch_fs_usage __percpu *usage[2]; struct bch_fs_usage __percpu *usage[2];
struct bch_fs_usage __percpu *usage_gc;
u64 __percpu *online_reserved;
/* single element mempool: */ /* single element mempool: */
struct mutex usage_scratch_lock; struct mutex usage_scratch_lock;
struct bch_fs_usage *usage_scratch; struct bch_fs_usage_online *usage_scratch;
/* /*
* When we invalidate buckets, we use both the priority and the amount * When we invalidate buckets, we use both the priority and the amount
......
...@@ -490,8 +490,8 @@ static void bch2_gc_free(struct bch_fs *c) ...@@ -490,8 +490,8 @@ static void bch2_gc_free(struct bch_fs *c)
ca->usage[1] = NULL; ca->usage[1] = NULL;
} }
free_percpu(c->usage[1]); free_percpu(c->usage_gc);
c->usage[1] = NULL; c->usage_gc = NULL;
} }
static int bch2_gc_done(struct bch_fs *c, static int bch2_gc_done(struct bch_fs *c,
...@@ -587,14 +587,16 @@ static int bch2_gc_done(struct bch_fs *c, ...@@ -587,14 +587,16 @@ static int bch2_gc_done(struct bch_fs *c,
} }
}; };
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
bch2_fs_usage_acc_to_base(c, i);
bch2_dev_usage_from_buckets(c); bch2_dev_usage_from_buckets(c);
{ {
unsigned nr = fs_usage_u64s(c); unsigned nr = fs_usage_u64s(c);
struct bch_fs_usage *dst = (void *) struct bch_fs_usage *dst = c->usage_base;
bch2_acc_percpu_u64s((void *) c->usage[0], nr);
struct bch_fs_usage *src = (void *) struct bch_fs_usage *src = (void *)
bch2_acc_percpu_u64s((void *) c->usage[1], nr); bch2_acc_percpu_u64s((void *) c->usage_gc, nr);
copy_fs_field(hidden, "hidden"); copy_fs_field(hidden, "hidden");
copy_fs_field(btree, "btree"); copy_fs_field(btree, "btree");
...@@ -647,11 +649,11 @@ static int bch2_gc_start(struct bch_fs *c, ...@@ -647,11 +649,11 @@ static int bch2_gc_start(struct bch_fs *c,
*/ */
gc_pos_set(c, gc_phase(GC_PHASE_START)); gc_pos_set(c, gc_phase(GC_PHASE_START));
BUG_ON(c->usage[1]); BUG_ON(c->usage_gc);
c->usage[1] = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64), c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
sizeof(u64), GFP_KERNEL); sizeof(u64), GFP_KERNEL);
if (!c->usage[1]) if (!c->usage_gc)
return -ENOMEM; return -ENOMEM;
for_each_member_device(ca, c, i) { for_each_member_device(ca, c, i) {
...@@ -770,11 +772,17 @@ int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys, ...@@ -770,11 +772,17 @@ int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys,
ret = -EINVAL; ret = -EINVAL;
} }
percpu_down_write(&c->mark_lock); if (!ret) {
bch2_journal_block(&c->journal);
if (!ret) percpu_down_write(&c->mark_lock);
ret = bch2_gc_done(c, initial, metadata_only); ret = bch2_gc_done(c, initial, metadata_only);
bch2_journal_unblock(&c->journal);
} else {
percpu_down_write(&c->mark_lock);
}
/* Indicates that gc is no longer in progress: */ /* Indicates that gc is no longer in progress: */
__gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING)); __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
......
...@@ -1066,7 +1066,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b) ...@@ -1066,7 +1066,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
{ {
struct bch_fs *c = as->c; struct bch_fs *c = as->c;
struct btree *old = btree_node_root(c, b); struct btree *old = btree_node_root(c, b);
struct bch_fs_usage *fs_usage; struct bch_fs_usage_online *fs_usage;
__bch2_btree_set_root_inmem(c, b); __bch2_btree_set_root_inmem(c, b);
...@@ -1075,7 +1075,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b) ...@@ -1075,7 +1075,7 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
fs_usage = bch2_fs_usage_scratch_get(c); fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key), bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0, fs_usage, 0, 0); true, 0, &fs_usage->u, 0, 0);
if (gc_visited(c, gc_pos_btree_root(b->btree_id))) if (gc_visited(c, gc_pos_btree_root(b->btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key), bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
true, 0, NULL, 0, true, 0, NULL, 0,
...@@ -1084,8 +1084,8 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b) ...@@ -1084,8 +1084,8 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
if (old && !btree_node_fake(old)) if (old && !btree_node_fake(old))
bch2_btree_node_free_index(as, NULL, bch2_btree_node_free_index(as, NULL,
bkey_i_to_s_c(&old->key), bkey_i_to_s_c(&old->key),
fs_usage); &fs_usage->u);
bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res); bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res, 0);
bch2_fs_usage_scratch_put(c, fs_usage); bch2_fs_usage_scratch_put(c, fs_usage);
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
...@@ -1160,7 +1160,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b ...@@ -1160,7 +1160,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
struct btree_node_iter *node_iter) struct btree_node_iter *node_iter)
{ {
struct bch_fs *c = as->c; struct bch_fs *c = as->c;
struct bch_fs_usage *fs_usage; struct bch_fs_usage_online *fs_usage;
struct bkey_packed *k; struct bkey_packed *k;
struct bkey tmp; struct bkey tmp;
...@@ -1171,7 +1171,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b ...@@ -1171,7 +1171,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
fs_usage = bch2_fs_usage_scratch_get(c); fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(insert), bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
true, 0, fs_usage, 0, 0); true, 0, &fs_usage->u, 0, 0);
if (gc_visited(c, gc_pos_btree_node(b))) if (gc_visited(c, gc_pos_btree_node(b)))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert), bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
...@@ -1188,9 +1188,9 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b ...@@ -1188,9 +1188,9 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
if (k && !bkey_cmp_packed(b, k, &insert->k)) if (k && !bkey_cmp_packed(b, k, &insert->k))
bch2_btree_node_free_index(as, b, bch2_btree_node_free_index(as, b,
bkey_disassemble(b, k, &tmp), bkey_disassemble(b, k, &tmp),
fs_usage); &fs_usage->u);
bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res); bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res, 0);
bch2_fs_usage_scratch_put(c, fs_usage); bch2_fs_usage_scratch_put(c, fs_usage);
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
...@@ -1984,7 +1984,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c, ...@@ -1984,7 +1984,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
bkey_copy(&b->key, &new_key->k_i); bkey_copy(&b->key, &new_key->k_i);
} }
} else { } else {
struct bch_fs_usage *fs_usage; struct bch_fs_usage_online *fs_usage;
BUG_ON(btree_node_root(c, b) != b); BUG_ON(btree_node_root(c, b) != b);
...@@ -1995,7 +1995,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c, ...@@ -1995,7 +1995,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
fs_usage = bch2_fs_usage_scratch_get(c); fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i), bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0, fs_usage, 0, 0); true, 0, &fs_usage->u, 0, 0);
if (gc_visited(c, gc_pos_btree_root(b->btree_id))) if (gc_visited(c, gc_pos_btree_root(b->btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i), bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
true, 0, NULL, 0, true, 0, NULL, 0,
...@@ -2003,8 +2003,8 @@ static void __bch2_btree_node_update_key(struct bch_fs *c, ...@@ -2003,8 +2003,8 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
bch2_btree_node_free_index(as, NULL, bch2_btree_node_free_index(as, NULL,
bkey_i_to_s_c(&b->key), bkey_i_to_s_c(&b->key),
fs_usage); &fs_usage->u);
bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res); bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res, 0);
bch2_fs_usage_scratch_put(c, fs_usage); bch2_fs_usage_scratch_put(c, fs_usage);
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
......
...@@ -533,7 +533,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans, ...@@ -533,7 +533,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
struct btree_insert_entry **stopped_at) struct btree_insert_entry **stopped_at)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bch_fs_usage *fs_usage = NULL; struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i; struct btree_insert_entry *i;
struct btree_iter *linked; struct btree_iter *linked;
int ret; int ret;
...@@ -608,7 +608,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans, ...@@ -608,7 +608,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
if (likely(!(trans->flags & BTREE_INSERT_NOMARK))) { if (likely(!(trans->flags & BTREE_INSERT_NOMARK))) {
trans_for_each_update_iter(trans, i) trans_for_each_update_iter(trans, i)
bch2_mark_update(trans, i, fs_usage, 0); bch2_mark_update(trans, i, &fs_usage->u, 0);
if (fs_usage) if (fs_usage)
bch2_trans_fs_usage_apply(trans, fs_usage); bch2_trans_fs_usage_apply(trans, fs_usage);
......
This diff is collapsed.
...@@ -219,12 +219,19 @@ static inline unsigned fs_usage_u64s(struct bch_fs *c) ...@@ -219,12 +219,19 @@ static inline unsigned fs_usage_u64s(struct bch_fs *c)
READ_ONCE(c->replicas.nr); READ_ONCE(c->replicas.nr);
} }
void bch2_fs_usage_scratch_put(struct bch_fs *, struct bch_fs_usage *); void bch2_fs_usage_scratch_put(struct bch_fs *, struct bch_fs_usage_online *);
struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *); struct bch_fs_usage_online *bch2_fs_usage_scratch_get(struct bch_fs *);
struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *); u64 bch2_fs_usage_read_one(struct bch_fs *, u64 *);
u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage *); struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *);
void bch2_fs_usage_acc_to_base(struct bch_fs *, unsigned);
void bch2_fs_usage_to_text(struct printbuf *,
struct bch_fs *, struct bch_fs_usage_online *);
u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage_online *);
struct bch_fs_usage_short struct bch_fs_usage_short
bch2_fs_usage_read_short(struct bch_fs *); bch2_fs_usage_read_short(struct bch_fs *);
...@@ -251,25 +258,23 @@ int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c, ...@@ -251,25 +258,23 @@ int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c,
int bch2_mark_key(struct bch_fs *, struct bkey_s_c, int bch2_mark_key(struct bch_fs *, struct bkey_s_c,
bool, s64, struct bch_fs_usage *, bool, s64, struct bch_fs_usage *,
u64, unsigned); u64, unsigned);
int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *, int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage_online *,
struct disk_reservation *); struct disk_reservation *, unsigned);
int bch2_mark_overwrite(struct btree_trans *, struct btree_iter *, int bch2_mark_overwrite(struct btree_trans *, struct btree_iter *,
struct bkey_s_c, struct bkey_i *, struct bkey_s_c, struct bkey_i *,
struct bch_fs_usage *, unsigned); struct bch_fs_usage *, unsigned);
int bch2_mark_update(struct btree_trans *, struct btree_insert_entry *, int bch2_mark_update(struct btree_trans *, struct btree_insert_entry *,
struct bch_fs_usage *, unsigned); struct bch_fs_usage *, unsigned);
void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage *); void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage_online *);
/* disk reservations: */ /* disk reservations: */
void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
static inline void bch2_disk_reservation_put(struct bch_fs *c, static inline void bch2_disk_reservation_put(struct bch_fs *c,
struct disk_reservation *res) struct disk_reservation *res)
{ {
if (res->sectors) this_cpu_sub(*c->online_reserved, res->sectors);
__bch2_disk_reservation_put(c, res); res->sectors = 0;
} }
#define BCH_DISK_RESERVATION_NOFAIL (1 << 0) #define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
......
...@@ -52,7 +52,6 @@ struct bucket_array { ...@@ -52,7 +52,6 @@ struct bucket_array {
struct bch_dev_usage { struct bch_dev_usage {
u64 buckets[BCH_DATA_NR]; u64 buckets[BCH_DATA_NR];
u64 buckets_alloc;
u64 buckets_ec; u64 buckets_ec;
u64 buckets_unavailable; u64 buckets_unavailable;
...@@ -63,12 +62,6 @@ struct bch_dev_usage { ...@@ -63,12 +62,6 @@ struct bch_dev_usage {
struct bch_fs_usage { struct bch_fs_usage {
/* all fields are in units of 512 byte sectors: */ /* all fields are in units of 512 byte sectors: */
u64 online_reserved;
/* fields after online_reserved are cleared/recalculated by gc: */
u64 gc_start[0];
u64 hidden; u64 hidden;
u64 btree; u64 btree;
u64 data; u64 data;
...@@ -88,6 +81,11 @@ struct bch_fs_usage { ...@@ -88,6 +81,11 @@ struct bch_fs_usage {
u64 replicas[]; u64 replicas[];
}; };
struct bch_fs_usage_online {
u64 online_reserved;
struct bch_fs_usage u;
};
struct bch_fs_usage_short { struct bch_fs_usage_short {
u64 capacity; u64 capacity;
u64 used; u64 used;
......
...@@ -394,7 +394,7 @@ static long bch2_ioctl_usage(struct bch_fs *c, ...@@ -394,7 +394,7 @@ static long bch2_ioctl_usage(struct bch_fs *c,
} }
{ {
struct bch_fs_usage *src; struct bch_fs_usage_online *src;
struct bch_ioctl_fs_usage dst = { struct bch_ioctl_fs_usage dst = {
.capacity = c->capacity, .capacity = c->capacity,
}; };
...@@ -410,7 +410,7 @@ static long bch2_ioctl_usage(struct bch_fs *c, ...@@ -410,7 +410,7 @@ static long bch2_ioctl_usage(struct bch_fs *c,
for (i = 0; i < BCH_REPLICAS_MAX; i++) { for (i = 0; i < BCH_REPLICAS_MAX; i++) {
dst.persistent_reserved[i] = dst.persistent_reserved[i] =
src->persistent_reserved[i]; src->u.persistent_reserved[i];
#if 0 #if 0
for (j = 0; j < BCH_DATA_NR; j++) for (j = 0; j < BCH_DATA_NR; j++)
dst.sectors[j][i] = src.replicas[i].data[j]; dst.sectors[j][i] = src.replicas[i].data[j];
......
...@@ -404,13 +404,11 @@ static int journal_replay_entry_early(struct bch_fs *c, ...@@ -404,13 +404,11 @@ static int journal_replay_entry_early(struct bch_fs *c,
switch (entry->btree_id) { switch (entry->btree_id) {
case FS_USAGE_RESERVED: case FS_USAGE_RESERVED:
if (entry->level < BCH_REPLICAS_MAX) if (entry->level < BCH_REPLICAS_MAX)
percpu_u64_set(&c->usage[0]-> c->usage_base->persistent_reserved[entry->level] =
persistent_reserved[entry->level], le64_to_cpu(u->v);
le64_to_cpu(u->v));
break; break;
case FS_USAGE_INODES: case FS_USAGE_INODES:
percpu_u64_set(&c->usage[0]->nr_inodes, c->usage_base->nr_inodes = le64_to_cpu(u->v);
le64_to_cpu(u->v));
break; break;
case FS_USAGE_KEY_VERSION: case FS_USAGE_KEY_VERSION:
atomic64_set(&c->key_version, atomic64_set(&c->key_version,
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h" #include "bcachefs.h"
#include "buckets.h"
#include "journal.h" #include "journal.h"
#include "replicas.h" #include "replicas.h"
#include "super-io.h" #include "super-io.h"
...@@ -235,20 +236,13 @@ bool bch2_replicas_marked(struct bch_fs *c, ...@@ -235,20 +236,13 @@ bool bch2_replicas_marked(struct bch_fs *c,
return marked; return marked;
} }
static void __replicas_table_update(struct bch_fs_usage __percpu *dst_p, static void __replicas_table_update(struct bch_fs_usage *dst,
struct bch_replicas_cpu *dst_r, struct bch_replicas_cpu *dst_r,
struct bch_fs_usage __percpu *src_p, struct bch_fs_usage *src,
struct bch_replicas_cpu *src_r) struct bch_replicas_cpu *src_r)
{ {
unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
struct bch_fs_usage *dst, *src = (void *)
bch2_acc_percpu_u64s((void *) src_p, src_nr);
int src_idx, dst_idx; int src_idx, dst_idx;
preempt_disable();
dst = this_cpu_ptr(dst_p);
preempt_enable();
*dst = *src; *dst = *src;
for (src_idx = 0; src_idx < src_r->nr; src_idx++) { for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
...@@ -263,42 +257,75 @@ static void __replicas_table_update(struct bch_fs_usage __percpu *dst_p, ...@@ -263,42 +257,75 @@ static void __replicas_table_update(struct bch_fs_usage __percpu *dst_p,
} }
} }
static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
struct bch_replicas_cpu *dst_r,
struct bch_fs_usage __percpu *src_p,
struct bch_replicas_cpu *src_r)
{
unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
struct bch_fs_usage *dst, *src = (void *)
bch2_acc_percpu_u64s((void *) src_p, src_nr);
preempt_disable();
dst = this_cpu_ptr(dst_p);
preempt_enable();
__replicas_table_update(dst, dst_r, src, src_r);
}
/* /*
* Resize filesystem accounting: * Resize filesystem accounting:
*/ */
static int replicas_table_update(struct bch_fs *c, static int replicas_table_update(struct bch_fs *c,
struct bch_replicas_cpu *new_r) struct bch_replicas_cpu *new_r)
{ {
struct bch_fs_usage __percpu *new_usage[2] = { NULL, NULL }; struct bch_fs_usage __percpu *new_usage[2];
struct bch_fs_usage *new_scratch = NULL; struct bch_fs_usage_online *new_scratch = NULL;
unsigned bytes = sizeof(struct bch_fs_usage) + struct bch_fs_usage __percpu *new_gc = NULL;
struct bch_fs_usage *new_base = NULL;
unsigned i, bytes = sizeof(struct bch_fs_usage) +
sizeof(u64) * new_r->nr;
unsigned scratch_bytes = sizeof(struct bch_fs_usage_online) +
sizeof(u64) * new_r->nr; sizeof(u64) * new_r->nr;
int ret = -ENOMEM; int ret = -ENOMEM;
if (!(new_usage[0] = __alloc_percpu_gfp(bytes, sizeof(u64), memset(new_usage, 0, sizeof(new_usage));
GFP_NOIO)) ||
(c->usage[1] && for (i = 0; i < ARRAY_SIZE(new_usage); i++)
!(new_usage[1] = __alloc_percpu_gfp(bytes, sizeof(u64), if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
GFP_NOIO))) || sizeof(u64), GFP_NOIO)))
!(new_scratch = kmalloc(bytes, GFP_NOIO)))
goto err; goto err;
if (c->usage[0]) if (!(new_base = kzalloc(bytes, GFP_NOIO)) ||
__replicas_table_update(new_usage[0], new_r, !(new_scratch = kmalloc(scratch_bytes, GFP_NOIO)) ||
c->usage[0], &c->replicas); (c->usage_gc &&
if (c->usage[1]) !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO))))
__replicas_table_update(new_usage[1], new_r, goto err;
c->usage[1], &c->replicas);
swap(c->usage[0], new_usage[0]); for (i = 0; i < ARRAY_SIZE(new_usage); i++)
swap(c->usage[1], new_usage[1]); if (c->usage[i])
__replicas_table_update_pcpu(new_usage[i], new_r,
c->usage[i], &c->replicas);
if (c->usage_base)
__replicas_table_update(new_base, new_r,
c->usage_base, &c->replicas);
if (c->usage_gc)
__replicas_table_update_pcpu(new_gc, new_r,
c->usage_gc, &c->replicas);
for (i = 0; i < ARRAY_SIZE(new_usage); i++)
swap(c->usage[i], new_usage[i]);
swap(c->usage_base, new_base);
swap(c->usage_scratch, new_scratch); swap(c->usage_scratch, new_scratch);
swap(c->usage_gc, new_gc);
swap(c->replicas, *new_r); swap(c->replicas, *new_r);
ret = 0; ret = 0;
err: err:
free_percpu(new_gc);
kfree(new_scratch); kfree(new_scratch);
free_percpu(new_usage[1]); free_percpu(new_usage[1]);
free_percpu(new_usage[0]); free_percpu(new_usage[0]);
kfree(new_base);
return ret; return ret;
} }
...@@ -457,9 +484,7 @@ int bch2_replicas_gc_end(struct bch_fs *c, int ret) ...@@ -457,9 +484,7 @@ int bch2_replicas_gc_end(struct bch_fs *c, int ret)
lockdep_assert_held(&c->replicas_gc_lock); lockdep_assert_held(&c->replicas_gc_lock);
mutex_lock(&c->sb_lock); mutex_lock(&c->sb_lock);
percpu_down_write(&c->mark_lock);
if (ret)
goto err;
/* /*
* this is kind of crappy; the replicas gc mechanism needs to be ripped * this is kind of crappy; the replicas gc mechanism needs to be ripped
...@@ -470,46 +495,39 @@ int bch2_replicas_gc_end(struct bch_fs *c, int ret) ...@@ -470,46 +495,39 @@ int bch2_replicas_gc_end(struct bch_fs *c, int ret)
struct bch_replicas_entry *e = struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i); cpu_replicas_entry(&c->replicas, i);
struct bch_replicas_cpu n; struct bch_replicas_cpu n;
u64 v;
if (__replicas_has_entry(&c->replicas_gc, e))
continue;
v = percpu_u64_get(&c->usage[0]->replicas[i]);
if (!v)
continue;
if (!__replicas_has_entry(&c->replicas_gc, e) &&
(c->usage_base->replicas[i] ||
percpu_u64_get(&c->usage[0]->replicas[i]) ||
percpu_u64_get(&c->usage[1]->replicas[i]))) {
n = cpu_replicas_add_entry(&c->replicas_gc, e); n = cpu_replicas_add_entry(&c->replicas_gc, e);
if (!n.entries) { if (!n.entries) {
ret = -ENOSPC; ret = -ENOSPC;
goto err; goto err;
} }
percpu_down_write(&c->mark_lock);
swap(n, c->replicas_gc); swap(n, c->replicas_gc);
percpu_up_write(&c->mark_lock);
kfree(n.entries); kfree(n.entries);
} }
}
if (bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc)) { if (bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc)) {
ret = -ENOSPC; ret = -ENOSPC;
goto err; goto err;
} }
bch2_write_super(c);
/* don't update in memory replicas until changes are persistent */
err:
percpu_down_write(&c->mark_lock);
if (!ret)
ret = replicas_table_update(c, &c->replicas_gc); ret = replicas_table_update(c, &c->replicas_gc);
err:
kfree(c->replicas_gc.entries); kfree(c->replicas_gc.entries);
c->replicas_gc.entries = NULL; c->replicas_gc.entries = NULL;
percpu_up_write(&c->mark_lock); percpu_up_write(&c->mark_lock);
if (!ret)
bch2_write_super(c);
mutex_unlock(&c->sb_lock); mutex_unlock(&c->sb_lock);
return ret; return ret;
} }
...@@ -576,7 +594,7 @@ int bch2_replicas_set_usage(struct bch_fs *c, ...@@ -576,7 +594,7 @@ int bch2_replicas_set_usage(struct bch_fs *c,
BUG_ON(ret < 0); BUG_ON(ret < 0);
} }
percpu_u64_set(&c->usage[0]->replicas[idx], sectors); c->usage_base->replicas[idx] = sectors;
return 0; return 0;
} }
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h" #include "bcachefs.h"
#include "buckets.h"
#include "checksum.h" #include "checksum.h"
#include "disk_groups.h" #include "disk_groups.h"
#include "ec.h" #include "ec.h"
...@@ -978,13 +979,16 @@ bch2_journal_super_entries_add_common(struct bch_fs *c, ...@@ -978,13 +979,16 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
mutex_unlock(&c->btree_root_lock); mutex_unlock(&c->btree_root_lock);
if (journal_seq) percpu_down_read(&c->mark_lock);
return entry;
percpu_down_write(&c->mark_lock); if (!journal_seq) {
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
bch2_fs_usage_acc_to_base(c, i);
} else {
bch2_fs_usage_acc_to_base(c, journal_seq & 1);
}
{ {
u64 nr_inodes = percpu_u64_get(&c->usage[0]->nr_inodes);
struct jset_entry_usage *u = struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry); container_of(entry, struct jset_entry_usage, entry);
...@@ -992,7 +996,7 @@ bch2_journal_super_entries_add_common(struct bch_fs *c, ...@@ -992,7 +996,7 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1; u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
u->entry.type = BCH_JSET_ENTRY_usage; u->entry.type = BCH_JSET_ENTRY_usage;
u->entry.btree_id = FS_USAGE_INODES; u->entry.btree_id = FS_USAGE_INODES;
u->v = cpu_to_le64(nr_inodes); u->v = cpu_to_le64(c->usage_base->nr_inodes);
entry = vstruct_next(entry); entry = vstruct_next(entry);
} }
...@@ -1013,17 +1017,13 @@ bch2_journal_super_entries_add_common(struct bch_fs *c, ...@@ -1013,17 +1017,13 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
for (i = 0; i < BCH_REPLICAS_MAX; i++) { for (i = 0; i < BCH_REPLICAS_MAX; i++) {
struct jset_entry_usage *u = struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry); container_of(entry, struct jset_entry_usage, entry);
u64 sectors = percpu_u64_get(&c->usage[0]->persistent_reserved[i]);
if (!sectors)
continue;
memset(u, 0, sizeof(*u)); memset(u, 0, sizeof(*u));
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1; u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
u->entry.type = BCH_JSET_ENTRY_usage; u->entry.type = BCH_JSET_ENTRY_usage;
u->entry.btree_id = FS_USAGE_RESERVED; u->entry.btree_id = FS_USAGE_RESERVED;
u->entry.level = i; u->entry.level = i;
u->v = sectors; u->v = cpu_to_le64(c->usage_base->persistent_reserved[i]);
entry = vstruct_next(entry); entry = vstruct_next(entry);
} }
...@@ -1031,7 +1031,6 @@ bch2_journal_super_entries_add_common(struct bch_fs *c, ...@@ -1031,7 +1031,6 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
for (i = 0; i < c->replicas.nr; i++) { for (i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry *e = struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i); cpu_replicas_entry(&c->replicas, i);
u64 sectors = percpu_u64_get(&c->usage[0]->replicas[i]);
struct jset_entry_data_usage *u = struct jset_entry_data_usage *u =
container_of(entry, struct jset_entry_data_usage, entry); container_of(entry, struct jset_entry_data_usage, entry);
...@@ -1039,14 +1038,14 @@ bch2_journal_super_entries_add_common(struct bch_fs *c, ...@@ -1039,14 +1038,14 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
u->entry.u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs, u->entry.u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs,
sizeof(u64)) - 1; sizeof(u64)) - 1;
u->entry.type = BCH_JSET_ENTRY_data_usage; u->entry.type = BCH_JSET_ENTRY_data_usage;
u->v = cpu_to_le64(sectors); u->v = cpu_to_le64(c->usage_base->replicas[i]);
unsafe_memcpy(&u->r, e, replicas_entry_bytes(e), unsafe_memcpy(&u->r, e, replicas_entry_bytes(e),
"embedded variable length struct"); "embedded variable length struct");
entry = vstruct_next(entry); entry = vstruct_next(entry);
} }
percpu_up_write(&c->mark_lock); percpu_up_read(&c->mark_lock);
return entry; return entry;
} }
......
...@@ -464,8 +464,11 @@ static void bch2_fs_free(struct bch_fs *c) ...@@ -464,8 +464,11 @@ static void bch2_fs_free(struct bch_fs *c)
bch2_io_clock_exit(&c->io_clock[READ]); bch2_io_clock_exit(&c->io_clock[READ]);
bch2_fs_compress_exit(c); bch2_fs_compress_exit(c);
percpu_free_rwsem(&c->mark_lock); percpu_free_rwsem(&c->mark_lock);
free_percpu(c->online_reserved);
kfree(c->usage_scratch); kfree(c->usage_scratch);
free_percpu(c->usage[1]);
free_percpu(c->usage[0]); free_percpu(c->usage[0]);
kfree(c->usage_base);
free_percpu(c->pcpu); free_percpu(c->pcpu);
mempool_exit(&c->btree_iters_pool); mempool_exit(&c->btree_iters_pool);
mempool_exit(&c->btree_bounce_pool); mempool_exit(&c->btree_bounce_pool);
...@@ -658,6 +661,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) ...@@ -658,6 +661,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
seqcount_init(&c->gc_pos_lock); seqcount_init(&c->gc_pos_lock);
seqcount_init(&c->usage_lock);
c->copy_gc_enabled = 1; c->copy_gc_enabled = 1;
c->rebalance.enabled = 1; c->rebalance.enabled = 1;
c->promote_whole_extents = true; c->promote_whole_extents = true;
...@@ -721,6 +726,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) ...@@ -721,6 +726,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
offsetof(struct btree_write_bio, wbio.bio)), offsetof(struct btree_write_bio, wbio.bio)),
BIOSET_NEED_BVECS) || BIOSET_NEED_BVECS) ||
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) || !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
!(c->online_reserved = alloc_percpu(u64)) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1, mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) || btree_bytes(c)) ||
mempool_init_kmalloc_pool(&c->btree_iters_pool, 1, mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
...@@ -1433,13 +1439,8 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) ...@@ -1433,13 +1439,8 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
static void dev_usage_clear(struct bch_dev *ca) static void dev_usage_clear(struct bch_dev *ca)
{ {
struct bucket_array *buckets; struct bucket_array *buckets;
int cpu;
for_each_possible_cpu(cpu) { percpu_memset(ca->usage[0], 0, sizeof(*ca->usage[0]));
struct bch_dev_usage *p =
per_cpu_ptr(ca->usage[0], cpu);
memset(p, 0, sizeof(*p));
}
down_read(&ca->bucket_lock); down_read(&ca->bucket_lock);
buckets = bucket_array(ca); buckets = bucket_array(ca);
......
...@@ -235,43 +235,12 @@ static size_t bch2_btree_cache_size(struct bch_fs *c) ...@@ -235,43 +235,12 @@ static size_t bch2_btree_cache_size(struct bch_fs *c)
static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf) static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
{ {
struct printbuf out = _PBUF(buf, PAGE_SIZE); struct printbuf out = _PBUF(buf, PAGE_SIZE);
struct bch_fs_usage *fs_usage = bch2_fs_usage_read(c); struct bch_fs_usage_online *fs_usage = bch2_fs_usage_read(c);
unsigned i;
if (!fs_usage) if (!fs_usage)
return -ENOMEM; return -ENOMEM;
pr_buf(&out, "capacity:\t\t\t%llu\n", c->capacity); bch2_fs_usage_to_text(&out, c, fs_usage);
pr_buf(&out, "hidden:\t\t\t\t%llu\n",
fs_usage->hidden);
pr_buf(&out, "data:\t\t\t\t%llu\n",
fs_usage->data);
pr_buf(&out, "cached:\t\t\t\t%llu\n",
fs_usage->cached);
pr_buf(&out, "reserved:\t\t\t%llu\n",
fs_usage->reserved);
pr_buf(&out, "nr_inodes:\t\t\t%llu\n",
fs_usage->nr_inodes);
pr_buf(&out, "online reserved:\t\t%llu\n",
fs_usage->online_reserved);
for (i = 0;
i < ARRAY_SIZE(fs_usage->persistent_reserved);
i++) {
pr_buf(&out, "%u replicas:\n", i + 1);
pr_buf(&out, "\treserved:\t\t%llu\n",
fs_usage->persistent_reserved[i]);
}
for (i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i);
pr_buf(&out, "\t");
bch2_replicas_entry_to_text(&out, e);
pr_buf(&out, ":\t%llu\n", fs_usage->replicas[i]);
}
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
...@@ -840,7 +809,6 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf) ...@@ -840,7 +809,6 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
"free[RESERVE_NONE]: %zu/%zu\n" "free[RESERVE_NONE]: %zu/%zu\n"
"buckets:\n" "buckets:\n"
" capacity: %llu\n" " capacity: %llu\n"
" alloc: %llu\n"
" sb: %llu\n" " sb: %llu\n"
" journal: %llu\n" " journal: %llu\n"
" meta: %llu\n" " meta: %llu\n"
...@@ -867,7 +835,6 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf) ...@@ -867,7 +835,6 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size, fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size, fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
ca->mi.nbuckets - ca->mi.first_bucket, ca->mi.nbuckets - ca->mi.first_bucket,
stats.buckets_alloc,
stats.buckets[BCH_DATA_SB], stats.buckets[BCH_DATA_SB],
stats.buckets[BCH_DATA_JOURNAL], stats.buckets[BCH_DATA_JOURNAL],
stats.buckets[BCH_DATA_BTREE], stats.buckets[BCH_DATA_BTREE],
......
...@@ -741,6 +741,14 @@ static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src, ...@@ -741,6 +741,14 @@ static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src,
acc_u64s(acc, per_cpu_ptr(src, cpu), nr); acc_u64s(acc, per_cpu_ptr(src, cpu), nr);
} }
static inline void percpu_memset(void __percpu *p, int c, size_t bytes)
{
int cpu;
for_each_possible_cpu(cpu)
memset(per_cpu_ptr(p, cpu), c, bytes);
}
u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned); u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned);
#define cmp_int(l, r) ((l > r) - (l < r)) #define cmp_int(l, r) ((l > r) - (l < r))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment