Commit bdc72765 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'bcachefs-2024-10-14' of git://evilpiepirate.org/bcachefs

Pull bcachefs fixes from Kent Overstreet:

 - New metadata version inode_has_child_snapshots

   This fixes bugs with handling of unlinked inodes + snapshots, in
   particular when an inode is reattached after taking a snapshot;
   deleted inodes now get correctly cleaned up across snapshots.

 - Disk accounting rewrite fixes
     - validation fixes for when a device has been removed
     - fix journal replay failing with "journal_reclaim_would_deadlock"

 - Some more small fixes for erasure coding + device removal

 - Assorted small syzbot fixes

* tag 'bcachefs-2024-10-14' of git://evilpiepirate.org/bcachefs: (27 commits)
  bcachefs: Fix sysfs warning in fstests generic/730,731
  bcachefs: Handle race between stripe reuse, invalidate_stripe_to_dev
  bcachefs: Fix kasan splat in new_stripe_alloc_buckets()
  bcachefs: Add missing validation for bch_stripe.csum_granularity_bits
  bcachefs: Fix missing bounds checks in bch2_alloc_read()
  bcachefs: fix uaf in bch2_dio_write_done()
  bcachefs: Improve check_snapshot_exists()
  bcachefs: Fix bkey_nocow_lock()
  bcachefs: Fix accounting replay flags
  bcachefs: Fix invalid shift in member_to_text()
  bcachefs: Fix bch2_have_enough_devs() for BCH_SB_MEMBER_INVALID
  bcachefs: __wait_for_freeing_inode: Switch to wait_bit_queue_entry
  bcachefs: Check if stuck in journal_res_get()
  closures: Add closure_wait_event_timeout()
  bcachefs: Fix state lock involved deadlock
  bcachefs: Fix NULL pointer dereference in bch2_opt_to_text
  bcachefs: Release transaction before wake up
  bcachefs: add check for btree id against max in try read node
  bcachefs: Disk accounting device validation fixes
  bcachefs: bch2_inode_or_descendents_is_open()
  ...
parents eca631b8 5e3b7232
...@@ -639,6 +639,16 @@ int bch2_alloc_read(struct bch_fs *c) ...@@ -639,6 +639,16 @@ int bch2_alloc_read(struct bch_fs *c)
continue; continue;
} }
if (k.k->p.offset < ca->mi.first_bucket) {
bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode, ca->mi.first_bucket));
continue;
}
if (k.k->p.offset >= ca->mi.nbuckets) {
bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
continue;
}
struct bch_alloc_v4 a; struct bch_alloc_v4 a;
*bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen; *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
0; 0;
......
...@@ -678,7 +678,8 @@ struct bch_sb_field_ext { ...@@ -678,7 +678,8 @@ struct bch_sb_field_ext {
x(disk_accounting_v2, BCH_VERSION(1, 9)) \ x(disk_accounting_v2, BCH_VERSION(1, 9)) \
x(disk_accounting_v3, BCH_VERSION(1, 10)) \ x(disk_accounting_v3, BCH_VERSION(1, 10)) \
x(disk_accounting_inum, BCH_VERSION(1, 11)) \ x(disk_accounting_inum, BCH_VERSION(1, 11)) \
x(rebalance_work_acct_fix, BCH_VERSION(1, 12)) x(rebalance_work_acct_fix, BCH_VERSION(1, 12)) \
x(inode_has_child_snapshots, BCH_VERSION(1, 13))
enum bcachefs_metadata_version { enum bcachefs_metadata_version {
bcachefs_metadata_version_min = 9, bcachefs_metadata_version_min = 9,
......
...@@ -1224,17 +1224,20 @@ int bch2_gc_gens(struct bch_fs *c) ...@@ -1224,17 +1224,20 @@ int bch2_gc_gens(struct bch_fs *c)
u64 b, start_time = local_clock(); u64 b, start_time = local_clock();
int ret; int ret;
/*
* Ideally we would be using state_lock and not gc_gens_lock here, but that
* introduces a deadlock in the RO path - we currently take the state
* lock at the start of going RO, thus the gc thread may get stuck:
*/
if (!mutex_trylock(&c->gc_gens_lock)) if (!mutex_trylock(&c->gc_gens_lock))
return 0; return 0;
trace_and_count(c, gc_gens_start, c); trace_and_count(c, gc_gens_start, c);
down_read(&c->state_lock); /*
* We have to use trylock here. Otherwise, we would
* introduce a deadlock in the RO path - we take the
* state lock at the start of going RO.
*/
if (!down_read_trylock(&c->state_lock)) {
mutex_unlock(&c->gc_gens_lock);
return 0;
}
for_each_member_device(c, ca) { for_each_member_device(c, ca) {
struct bucket_gens *gens = bucket_gens(ca); struct bucket_gens *gens = bucket_gens(ca);
......
...@@ -1838,10 +1838,11 @@ static void btree_node_write_done(struct bch_fs *c, struct btree *b) ...@@ -1838,10 +1838,11 @@ static void btree_node_write_done(struct bch_fs *c, struct btree *b)
struct btree_trans *trans = bch2_trans_get(c); struct btree_trans *trans = bch2_trans_get(c);
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
__btree_node_write_done(c, b);
six_unlock_read(&b->c.lock);
/* we don't need transaction context anymore after we got the lock. */
bch2_trans_put(trans); bch2_trans_put(trans);
__btree_node_write_done(c, b);
six_unlock_read(&b->c.lock);
} }
static void btree_node_write_work(struct work_struct *work) static void btree_node_write_work(struct work_struct *work)
......
...@@ -2381,9 +2381,9 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e ...@@ -2381,9 +2381,9 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
else else
iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k)); iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
if (unlikely(!(iter->flags & BTREE_ITER_is_extents) if (unlikely(iter->flags & BTREE_ITER_all_snapshots ? bpos_gt(iter_pos, end) :
? bkey_gt(iter_pos, end) iter->flags & BTREE_ITER_is_extents ? bkey_ge(iter_pos, end) :
: bkey_ge(iter_pos, end))) bkey_gt(iter_pos, end)))
goto end; goto end;
break; break;
......
...@@ -857,6 +857,14 @@ struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *); ...@@ -857,6 +857,14 @@ struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, _start,\ for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, _start,\
SPOS_MAX, _flags, _k, _ret) SPOS_MAX, _flags, _k, _ret)
#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \
_start, _flags, _k, _ret) \
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
(_start), (_flags)); \
(_k) = bch2_btree_iter_peek_prev_type(&(_iter), _flags), \
!((_ret) = bkey_err(_k)) && (_k).k; \
bch2_btree_iter_rewind(&(_iter)))
#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \ #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret) for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
......
...@@ -171,6 +171,9 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, ...@@ -171,6 +171,9 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca,
if (BTREE_NODE_LEVEL(bn) >= BTREE_MAX_DEPTH) if (BTREE_NODE_LEVEL(bn) >= BTREE_MAX_DEPTH)
return; return;
if (BTREE_NODE_ID(bn) >= BTREE_ID_NR_MAX)
return;
rcu_read_lock(); rcu_read_lock();
struct found_btree_node n = { struct found_btree_node n = {
.btree_id = BTREE_NODE_ID(bn), .btree_id = BTREE_NODE_ID(bn),
......
...@@ -80,6 +80,7 @@ static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struc ...@@ -80,6 +80,7 @@ static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struc
if (ptr2 == ptr) if (ptr2 == ptr)
break; break;
ca = bch2_dev_have_ref(c, ptr2->dev);
bucket = PTR_BUCKET_POS(ca, ptr2); bucket = PTR_BUCKET_POS(ca, ptr2);
bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0); bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
} }
......
...@@ -242,6 +242,14 @@ void bch2_accounting_swab(struct bkey_s k) ...@@ -242,6 +242,14 @@ void bch2_accounting_swab(struct bkey_s k)
*p = swab64(*p); *p = swab64(*p);
} }
static inline void __accounting_to_replicas(struct bch_replicas_entry_v1 *r,
struct disk_accounting_pos acc)
{
unsafe_memcpy(r, &acc.replicas,
replicas_entry_bytes(&acc.replicas),
"variable length struct");
}
static inline bool accounting_to_replicas(struct bch_replicas_entry_v1 *r, struct bpos p) static inline bool accounting_to_replicas(struct bch_replicas_entry_v1 *r, struct bpos p)
{ {
struct disk_accounting_pos acc_k; struct disk_accounting_pos acc_k;
...@@ -249,9 +257,7 @@ static inline bool accounting_to_replicas(struct bch_replicas_entry_v1 *r, struc ...@@ -249,9 +257,7 @@ static inline bool accounting_to_replicas(struct bch_replicas_entry_v1 *r, struc
switch (acc_k.type) { switch (acc_k.type) {
case BCH_DISK_ACCOUNTING_replicas: case BCH_DISK_ACCOUNTING_replicas:
unsafe_memcpy(r, &acc_k.replicas, __accounting_to_replicas(r, acc_k);
replicas_entry_bytes(&acc_k.replicas),
"variable length struct");
return true; return true;
default: default:
return false; return false;
...@@ -608,6 +614,81 @@ static int accounting_read_key(struct btree_trans *trans, struct bkey_s_c k) ...@@ -608,6 +614,81 @@ static int accounting_read_key(struct btree_trans *trans, struct bkey_s_c k)
return ret; return ret;
} }
static int bch2_disk_accounting_validate_late(struct btree_trans *trans,
struct disk_accounting_pos acc,
u64 *v, unsigned nr)
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
int ret = 0, invalid_dev = -1;
switch (acc.type) {
case BCH_DISK_ACCOUNTING_replicas: {
struct bch_replicas_padded r;
__accounting_to_replicas(&r.e, acc);
for (unsigned i = 0; i < r.e.nr_devs; i++)
if (r.e.devs[i] != BCH_SB_MEMBER_INVALID &&
!bch2_dev_exists(c, r.e.devs[i])) {
invalid_dev = r.e.devs[i];
goto invalid_device;
}
/*
* All replicas entry checks except for invalid device are done
* in bch2_accounting_validate
*/
BUG_ON(bch2_replicas_entry_validate(&r.e, c, &buf));
if (fsck_err_on(!bch2_replicas_marked_locked(c, &r.e),
trans, accounting_replicas_not_marked,
"accounting not marked in superblock replicas\n %s",
(printbuf_reset(&buf),
bch2_accounting_key_to_text(&buf, &acc),
buf.buf))) {
/*
* We're not RW yet and still single threaded, dropping
* and retaking lock is ok:
*/
percpu_up_write(&c->mark_lock);
ret = bch2_mark_replicas(c, &r.e);
if (ret)
goto fsck_err;
percpu_down_write(&c->mark_lock);
}
break;
}
case BCH_DISK_ACCOUNTING_dev_data_type:
if (!bch2_dev_exists(c, acc.dev_data_type.dev)) {
invalid_dev = acc.dev_data_type.dev;
goto invalid_device;
}
break;
}
fsck_err:
printbuf_exit(&buf);
return ret;
invalid_device:
if (fsck_err(trans, accounting_to_invalid_device,
"accounting entry points to invalid device %i\n %s",
invalid_dev,
(printbuf_reset(&buf),
bch2_accounting_key_to_text(&buf, &acc),
buf.buf))) {
for (unsigned i = 0; i < nr; i++)
v[i] = -v[i];
ret = commit_do(trans, NULL, NULL, 0,
bch2_disk_accounting_mod(trans, &acc, v, nr, false)) ?:
-BCH_ERR_remove_disk_accounting_entry;
} else {
ret = -BCH_ERR_remove_disk_accounting_entry;
}
goto fsck_err;
}
/* /*
* At startup time, initialize the in memory accounting from the btree (and * At startup time, initialize the in memory accounting from the btree (and
* journal) * journal)
...@@ -666,44 +747,42 @@ int bch2_accounting_read(struct bch_fs *c) ...@@ -666,44 +747,42 @@ int bch2_accounting_read(struct bch_fs *c)
} }
keys->gap = keys->nr = dst - keys->data; keys->gap = keys->nr = dst - keys->data;
percpu_down_read(&c->mark_lock); percpu_down_write(&c->mark_lock);
for (unsigned i = 0; i < acc->k.nr; i++) { unsigned i = 0;
u64 v[BCH_ACCOUNTING_MAX_COUNTERS]; while (i < acc->k.nr) {
bch2_accounting_mem_read_counters(acc, i, v, ARRAY_SIZE(v), false); unsigned idx = inorder_to_eytzinger0(i, acc->k.nr);
if (bch2_is_zero(v, sizeof(v[0]) * acc->k.data[i].nr_counters)) struct disk_accounting_pos acc_k;
continue; bpos_to_disk_accounting_pos(&acc_k, acc->k.data[idx].pos);
struct bch_replicas_padded r; u64 v[BCH_ACCOUNTING_MAX_COUNTERS];
if (!accounting_to_replicas(&r.e, acc->k.data[i].pos)) bch2_accounting_mem_read_counters(acc, idx, v, ARRAY_SIZE(v), false);
continue;
/* /*
* If the replicas entry is invalid it'll get cleaned up by * If the entry counters are zeroed, it should be treated as
* check_allocations: * nonexistent - it might point to an invalid device.
*
* Remove it, so that if it's re-added it gets re-marked in the
* superblock:
*/ */
if (bch2_replicas_entry_validate(&r.e, c, &buf)) ret = bch2_is_zero(v, sizeof(v[0]) * acc->k.data[idx].nr_counters)
? -BCH_ERR_remove_disk_accounting_entry
: bch2_disk_accounting_validate_late(trans, acc_k,
v, acc->k.data[idx].nr_counters);
if (ret == -BCH_ERR_remove_disk_accounting_entry) {
free_percpu(acc->k.data[idx].v[0]);
free_percpu(acc->k.data[idx].v[1]);
darray_remove_item(&acc->k, &acc->k.data[idx]);
eytzinger0_sort(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
accounting_pos_cmp, NULL);
ret = 0;
continue; continue;
}
struct disk_accounting_pos k;
bpos_to_disk_accounting_pos(&k, acc->k.data[i].pos);
if (fsck_err_on(!bch2_replicas_marked_locked(c, &r.e),
trans, accounting_replicas_not_marked,
"accounting not marked in superblock replicas\n %s",
(printbuf_reset(&buf),
bch2_accounting_key_to_text(&buf, &k),
buf.buf))) {
/*
* We're not RW yet and still single threaded, dropping
* and retaking lock is ok:
*/
percpu_up_read(&c->mark_lock);
ret = bch2_mark_replicas(c, &r.e);
if (ret) if (ret)
goto fsck_err; goto fsck_err;
percpu_down_read(&c->mark_lock); i++;
}
} }
preempt_disable(); preempt_disable();
...@@ -742,7 +821,7 @@ int bch2_accounting_read(struct bch_fs *c) ...@@ -742,7 +821,7 @@ int bch2_accounting_read(struct bch_fs *c)
} }
preempt_enable(); preempt_enable();
fsck_err: fsck_err:
percpu_up_read(&c->mark_lock); percpu_up_write(&c->mark_lock);
err: err:
printbuf_exit(&buf); printbuf_exit(&buf);
bch2_trans_put(trans); bch2_trans_put(trans);
......
...@@ -124,6 +124,11 @@ int bch2_stripe_validate(struct bch_fs *c, struct bkey_s_c k, ...@@ -124,6 +124,11 @@ int bch2_stripe_validate(struct bch_fs *c, struct bkey_s_c k,
"incorrect value size (%zu < %u)", "incorrect value size (%zu < %u)",
bkey_val_u64s(k.k), stripe_val_u64s(s)); bkey_val_u64s(k.k), stripe_val_u64s(s));
bkey_fsck_err_on(s->csum_granularity_bits >= 64,
c, stripe_csum_granularity_bad,
"invalid csum granularity (%u >= 64)",
s->csum_granularity_bits);
ret = bch2_bkey_ptrs_validate(c, k, flags); ret = bch2_bkey_ptrs_validate(c, k, flags);
fsck_err: fsck_err:
return ret; return ret;
...@@ -145,7 +150,11 @@ void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c, ...@@ -145,7 +150,11 @@ void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
nr_data, nr_data,
s.nr_redundant); s.nr_redundant);
bch2_prt_csum_type(out, s.csum_type); bch2_prt_csum_type(out, s.csum_type);
prt_printf(out, " gran %u", 1U << s.csum_granularity_bits); prt_str(out, " gran ");
if (s.csum_granularity_bits < 64)
prt_printf(out, "%llu", 1ULL << s.csum_granularity_bits);
else
prt_printf(out, "(invalid shift %u)", s.csum_granularity_bits);
if (s.disk_label) { if (s.disk_label) {
prt_str(out, " label"); prt_str(out, " label");
...@@ -1197,47 +1206,62 @@ void bch2_do_stripe_deletes(struct bch_fs *c) ...@@ -1197,47 +1206,62 @@ void bch2_do_stripe_deletes(struct bch_fs *c)
/* stripe creation: */ /* stripe creation: */
static int ec_stripe_key_update(struct btree_trans *trans, static int ec_stripe_key_update(struct btree_trans *trans,
struct bkey_i_stripe *new, struct bkey_i_stripe *old,
bool create) struct bkey_i_stripe *new)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_iter iter; bool create = !old;
struct bkey_s_c k;
int ret;
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, struct btree_iter iter;
struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
new->k.p, BTREE_ITER_intent); new->k.p, BTREE_ITER_intent);
ret = bkey_err(k); int ret = bkey_err(k);
if (ret) if (ret)
goto err; goto err;
if (k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe)) { if (bch2_fs_inconsistent_on(k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe),
bch2_fs_inconsistent(c, "error %s stripe: got existing key type %s", c, "error %s stripe: got existing key type %s",
create ? "creating" : "updating", create ? "creating" : "updating",
bch2_bkey_types[k.k->type]); bch2_bkey_types[k.k->type])) {
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
if (k.k->type == KEY_TYPE_stripe) { if (k.k->type == KEY_TYPE_stripe) {
const struct bch_stripe *old = bkey_s_c_to_stripe(k).v; const struct bch_stripe *v = bkey_s_c_to_stripe(k).v;
unsigned i;
BUG_ON(old->v.nr_blocks != new->v.nr_blocks);
BUG_ON(old->v.nr_blocks != v->nr_blocks);
for (unsigned i = 0; i < new->v.nr_blocks; i++) {
unsigned sectors = stripe_blockcount_get(v, i);
if (old->nr_blocks != new->v.nr_blocks) { if (!bch2_extent_ptr_eq(old->v.ptrs[i], new->v.ptrs[i]) && sectors) {
bch_err(c, "error updating stripe: nr_blocks does not match"); struct printbuf buf = PRINTBUF;
prt_printf(&buf, "stripe changed nonempty block %u", i);
prt_str(&buf, "\nold: ");
bch2_bkey_val_to_text(&buf, c, k);
prt_str(&buf, "\nnew: ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new->k_i));
bch2_fs_inconsistent(c, "%s", buf.buf);
printbuf_exit(&buf);
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
for (i = 0; i < new->v.nr_blocks; i++) { /*
unsigned v = stripe_blockcount_get(old, i); * If the stripe ptr changed underneath us, it must have
* been dev_remove_stripes() -> * invalidate_stripe_to_dev()
*/
if (!bch2_extent_ptr_eq(old->v.ptrs[i], v->ptrs[i])) {
BUG_ON(v->ptrs[i].dev != BCH_SB_MEMBER_INVALID);
BUG_ON(v && if (bch2_extent_ptr_eq(old->v.ptrs[i], new->v.ptrs[i]))
(old->ptrs[i].dev != new->v.ptrs[i].dev || new->v.ptrs[i].dev = BCH_SB_MEMBER_INVALID;
old->ptrs[i].gen != new->v.ptrs[i].gen || }
old->ptrs[i].offset != new->v.ptrs[i].offset));
stripe_blockcount_set(&new->v, i, v); stripe_blockcount_set(&new->v, i, sectors);
} }
} }
...@@ -1499,8 +1523,10 @@ static void ec_stripe_create(struct ec_stripe_new *s) ...@@ -1499,8 +1523,10 @@ static void ec_stripe_create(struct ec_stripe_new *s)
BCH_TRANS_COMMIT_no_check_rw| BCH_TRANS_COMMIT_no_check_rw|
BCH_TRANS_COMMIT_no_enospc, BCH_TRANS_COMMIT_no_enospc,
ec_stripe_key_update(trans, ec_stripe_key_update(trans,
bkey_i_to_stripe(&s->new_stripe.key), s->have_existing_stripe
!s->have_existing_stripe)); ? bkey_i_to_stripe(&s->existing_stripe.key)
: NULL,
bkey_i_to_stripe(&s->new_stripe.key)));
bch_err_msg(c, ret, "creating stripe key"); bch_err_msg(c, ret, "creating stripe key");
if (ret) { if (ret) {
goto err; goto err;
...@@ -1876,7 +1902,15 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_ ...@@ -1876,7 +1902,15 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
bitmap_and(devs.d, devs.d, c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX); bitmap_and(devs.d, devs.d, c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX);
for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) { for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) {
/*
* Note: we don't yet repair invalid blocks (failed/removed
* devices) when reusing stripes - we still need a codepath to
* walk backpointers and update all extents that point to that
* block when updating the stripe
*/
if (v->ptrs[i].dev != BCH_SB_MEMBER_INVALID)
__clear_bit(v->ptrs[i].dev, devs.d); __clear_bit(v->ptrs[i].dev, devs.d);
if (i < h->s->nr_data) if (i < h->s->nr_data)
nr_have_data++; nr_have_data++;
else else
......
...@@ -268,7 +268,8 @@ ...@@ -268,7 +268,8 @@
x(BCH_ERR_nopromote, nopromote_no_writes) \ x(BCH_ERR_nopromote, nopromote_no_writes) \
x(BCH_ERR_nopromote, nopromote_enomem) \ x(BCH_ERR_nopromote, nopromote_enomem) \
x(0, invalid_snapshot_node) \ x(0, invalid_snapshot_node) \
x(0, option_needs_open_fs) x(0, option_needs_open_fs) \
x(0, remove_disk_accounting_entry)
enum bch_errcode { enum bch_errcode {
BCH_ERR_START = 2048, BCH_ERR_START = 2048,
......
...@@ -695,6 +695,16 @@ void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *, ...@@ -695,6 +695,16 @@ void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
int bch2_bkey_ptrs_validate(struct bch_fs *, struct bkey_s_c, int bch2_bkey_ptrs_validate(struct bch_fs *, struct bkey_s_c,
enum bch_validate_flags); enum bch_validate_flags);
static inline bool bch2_extent_ptr_eq(struct bch_extent_ptr ptr1,
struct bch_extent_ptr ptr2)
{
return (ptr1.cached == ptr2.cached &&
ptr1.unwritten == ptr2.unwritten &&
ptr1.offset == ptr2.offset &&
ptr1.dev == ptr2.dev &&
ptr1.dev == ptr2.dev);
}
void bch2_ptr_swab(struct bkey_s); void bch2_ptr_swab(struct bkey_s);
const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c); const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c);
......
...@@ -369,6 +369,7 @@ static noinline void bch2_dio_write_flush(struct dio_write *dio) ...@@ -369,6 +369,7 @@ static noinline void bch2_dio_write_flush(struct dio_write *dio)
static __always_inline long bch2_dio_write_done(struct dio_write *dio) static __always_inline long bch2_dio_write_done(struct dio_write *dio)
{ {
struct bch_fs *c = dio->op.c;
struct kiocb *req = dio->req; struct kiocb *req = dio->req;
struct bch_inode_info *inode = dio->inode; struct bch_inode_info *inode = dio->inode;
bool sync = dio->sync; bool sync = dio->sync;
...@@ -387,7 +388,7 @@ static __always_inline long bch2_dio_write_done(struct dio_write *dio) ...@@ -387,7 +388,7 @@ static __always_inline long bch2_dio_write_done(struct dio_write *dio)
ret = dio->op.error ?: ((long) dio->written << 9); ret = dio->op.error ?: ((long) dio->written << 9);
bio_put(&dio->op.wbio.bio); bio_put(&dio->op.wbio.bio);
bch2_write_ref_put(dio->op.c, BCH_WRITE_REF_dio_write); bch2_write_ref_put(c, BCH_WRITE_REF_dio_write);
/* inode->i_dio_count is our ref on inode and thus bch_fs */ /* inode->i_dio_count is our ref on inode and thus bch_fs */
inode_dio_end(&inode->v); inode_dio_end(&inode->v);
......
...@@ -157,6 +157,20 @@ static bool subvol_inum_eq(subvol_inum a, subvol_inum b) ...@@ -157,6 +157,20 @@ static bool subvol_inum_eq(subvol_inum a, subvol_inum b)
return a.subvol == b.subvol && a.inum == b.inum; return a.subvol == b.subvol && a.inum == b.inum;
} }
static u32 bch2_vfs_inode_hash_fn(const void *data, u32 len, u32 seed)
{
const subvol_inum *inum = data;
return jhash(&inum->inum, sizeof(inum->inum), seed);
}
static u32 bch2_vfs_inode_obj_hash_fn(const void *data, u32 len, u32 seed)
{
const struct bch_inode_info *inode = data;
return bch2_vfs_inode_hash_fn(&inode->ei_inum, sizeof(inode->ei_inum), seed);
}
static int bch2_vfs_inode_cmp_fn(struct rhashtable_compare_arg *arg, static int bch2_vfs_inode_cmp_fn(struct rhashtable_compare_arg *arg,
const void *obj) const void *obj)
{ {
...@@ -170,11 +184,91 @@ static const struct rhashtable_params bch2_vfs_inodes_params = { ...@@ -170,11 +184,91 @@ static const struct rhashtable_params bch2_vfs_inodes_params = {
.head_offset = offsetof(struct bch_inode_info, hash), .head_offset = offsetof(struct bch_inode_info, hash),
.key_offset = offsetof(struct bch_inode_info, ei_inum), .key_offset = offsetof(struct bch_inode_info, ei_inum),
.key_len = sizeof(subvol_inum), .key_len = sizeof(subvol_inum),
.hashfn = bch2_vfs_inode_hash_fn,
.obj_hashfn = bch2_vfs_inode_obj_hash_fn,
.obj_cmpfn = bch2_vfs_inode_cmp_fn, .obj_cmpfn = bch2_vfs_inode_cmp_fn,
.automatic_shrinking = true, .automatic_shrinking = true,
}; };
struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *c, subvol_inum inum) int bch2_inode_or_descendents_is_open(struct btree_trans *trans, struct bpos p)
{
struct bch_fs *c = trans->c;
struct rhashtable *ht = &c->vfs_inodes_table;
subvol_inum inum = (subvol_inum) { .inum = p.offset };
DARRAY(u32) subvols;
int ret = 0;
if (!test_bit(BCH_FS_started, &c->flags))
return false;
darray_init(&subvols);
restart_from_top:
/*
* Tweaked version of __rhashtable_lookup(); we need to get a list of
* subvolumes in which the given inode number is open.
*
* For this to work, we don't include the subvolume ID in the key that
* we hash - all inodes with the same inode number regardless of
* subvolume will hash to the same slot.
*
* This will be less than ideal if the same file is ever open
* simultaneously in many different snapshots:
*/
rcu_read_lock();
struct rhash_lock_head __rcu *const *bkt;
struct rhash_head *he;
unsigned int hash;
struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, &inum, bch2_vfs_inodes_params);
bkt = rht_bucket(tbl, hash);
do {
struct bch_inode_info *inode;
rht_for_each_entry_rcu_from(inode, he, rht_ptr_rcu(bkt), tbl, hash, hash) {
if (inode->ei_inum.inum == inum.inum) {
ret = darray_push_gfp(&subvols, inode->ei_inum.subvol,
GFP_NOWAIT|__GFP_NOWARN);
if (ret) {
rcu_read_unlock();
ret = darray_make_room(&subvols, 1);
if (ret)
goto err;
subvols.nr = 0;
goto restart_from_top;
}
}
}
/* An object might have been moved to a different hash chain,
* while we walk along it - better check and retry.
*/
} while (he != RHT_NULLS_MARKER(bkt));
/* Ensure we see any new tables. */
smp_rmb();
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(tbl))
goto restart;
rcu_read_unlock();
darray_for_each(subvols, i) {
u32 snap;
ret = bch2_subvolume_get_snapshot(trans, *i, &snap);
if (ret)
goto err;
ret = bch2_snapshot_is_ancestor(c, snap, p.snapshot);
if (ret)
break;
}
err:
darray_exit(&subvols);
return ret;
}
static struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *c, subvol_inum inum)
{ {
return rhashtable_lookup_fast(&c->vfs_inodes_table, &inum, bch2_vfs_inodes_params); return rhashtable_lookup_fast(&c->vfs_inodes_table, &inum, bch2_vfs_inodes_params);
} }
...@@ -184,7 +278,8 @@ static void __wait_on_freeing_inode(struct bch_fs *c, ...@@ -184,7 +278,8 @@ static void __wait_on_freeing_inode(struct bch_fs *c,
subvol_inum inum) subvol_inum inum)
{ {
wait_queue_head_t *wq; wait_queue_head_t *wq;
DEFINE_WAIT_BIT(wait, &inode->v.i_state, __I_NEW); struct wait_bit_queue_entry wait;
wq = inode_bit_waitqueue(&wait, &inode->v, __I_NEW); wq = inode_bit_waitqueue(&wait, &inode->v, __I_NEW);
prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
spin_unlock(&inode->v.i_lock); spin_unlock(&inode->v.i_lock);
...@@ -252,7 +347,8 @@ static struct bch_inode_info *bch2_inode_hash_insert(struct bch_fs *c, ...@@ -252,7 +347,8 @@ static struct bch_inode_info *bch2_inode_hash_insert(struct bch_fs *c,
set_bit(EI_INODE_HASHED, &inode->ei_flags); set_bit(EI_INODE_HASHED, &inode->ei_flags);
retry: retry:
if (unlikely(rhashtable_lookup_insert_fast(&c->vfs_inodes_table, if (unlikely(rhashtable_lookup_insert_key(&c->vfs_inodes_table,
&inode->ei_inum,
&inode->hash, &inode->hash,
bch2_vfs_inodes_params))) { bch2_vfs_inodes_params))) {
old = bch2_inode_hash_find(c, trans, inode->ei_inum); old = bch2_inode_hash_find(c, trans, inode->ei_inum);
......
...@@ -54,8 +54,6 @@ static inline subvol_inum inode_inum(struct bch_inode_info *inode) ...@@ -54,8 +54,6 @@ static inline subvol_inum inode_inum(struct bch_inode_info *inode)
return inode->ei_inum; return inode->ei_inum;
} }
struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *, subvol_inum);
/* /*
* Set if we've gotten a btree error for this inode, and thus the vfs inode and * Set if we've gotten a btree error for this inode, and thus the vfs inode and
* btree inode may be inconsistent: * btree inode may be inconsistent:
...@@ -148,6 +146,8 @@ struct bch_inode_info * ...@@ -148,6 +146,8 @@ struct bch_inode_info *
__bch2_create(struct mnt_idmap *, struct bch_inode_info *, __bch2_create(struct mnt_idmap *, struct bch_inode_info *,
struct dentry *, umode_t, dev_t, subvol_inum, unsigned); struct dentry *, umode_t, dev_t, subvol_inum, unsigned);
int bch2_inode_or_descendents_is_open(struct btree_trans *trans, struct bpos p);
int bch2_fs_quota_transfer(struct bch_fs *, int bch2_fs_quota_transfer(struct bch_fs *,
struct bch_inode_info *, struct bch_inode_info *,
struct bch_qid, struct bch_qid,
...@@ -198,10 +198,7 @@ int bch2_vfs_init(void); ...@@ -198,10 +198,7 @@ int bch2_vfs_init(void);
#define bch2_inode_update_after_write(_trans, _inode, _inode_u, _fields) ({ do {} while (0); }) #define bch2_inode_update_after_write(_trans, _inode, _inode_u, _fields) ({ do {} while (0); })
static inline struct bch_inode_info *__bch2_inode_hash_find(struct bch_fs *c, subvol_inum inum) static inline int bch2_inode_or_descendents_is_open(struct btree_trans *trans, struct bpos p) { return 0; }
{
return NULL;
}
static inline void bch2_evict_subvolume_inodes(struct bch_fs *c, static inline void bch2_evict_subvolume_inodes(struct bch_fs *c,
snapshot_id_list *s) {} snapshot_id_list *s) {}
......
This diff is collapsed.
...@@ -9,6 +9,7 @@ int bch2_check_dirents(struct bch_fs *); ...@@ -9,6 +9,7 @@ int bch2_check_dirents(struct bch_fs *);
int bch2_check_xattrs(struct bch_fs *); int bch2_check_xattrs(struct bch_fs *);
int bch2_check_root(struct bch_fs *); int bch2_check_root(struct bch_fs *);
int bch2_check_subvolume_structure(struct bch_fs *); int bch2_check_subvolume_structure(struct bch_fs *);
int bch2_check_unreachable_inodes(struct bch_fs *);
int bch2_check_directory_structure(struct bch_fs *); int bch2_check_directory_structure(struct bch_fs *);
int bch2_check_nlinks(struct bch_fs *); int bch2_check_nlinks(struct bch_fs *);
int bch2_fix_reflink_p(struct bch_fs *); int bch2_fix_reflink_p(struct bch_fs *);
......
This diff is collapsed.
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "bkey.h" #include "bkey.h"
#include "bkey_methods.h" #include "bkey_methods.h"
#include "opts.h" #include "opts.h"
#include "snapshot.h"
enum bch_validate_flags; enum bch_validate_flags;
extern const char * const bch2_inode_opts[]; extern const char * const bch2_inode_opts[];
...@@ -17,6 +18,15 @@ int bch2_inode_v3_validate(struct bch_fs *, struct bkey_s_c, ...@@ -17,6 +18,15 @@ int bch2_inode_v3_validate(struct bch_fs *, struct bkey_s_c,
enum bch_validate_flags); enum bch_validate_flags);
void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
int __bch2_inode_has_child_snapshots(struct btree_trans *, struct bpos);
static inline int bch2_inode_has_child_snapshots(struct btree_trans *trans, struct bpos pos)
{
return bch2_snapshot_is_leaf(trans->c, pos.snapshot) <= 0
? __bch2_inode_has_child_snapshots(trans, pos)
: 0;
}
int bch2_trigger_inode(struct btree_trans *, enum btree_id, unsigned, int bch2_trigger_inode(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, struct bkey_s_c, struct bkey_s,
enum btree_iter_update_trigger_flags); enum btree_iter_update_trigger_flags);
......
...@@ -133,7 +133,8 @@ enum inode_opt_id { ...@@ -133,7 +133,8 @@ enum inode_opt_id {
x(i_size_dirty, 5) \ x(i_size_dirty, 5) \
x(i_sectors_dirty, 6) \ x(i_sectors_dirty, 6) \
x(unlinked, 7) \ x(unlinked, 7) \
x(backptr_untrusted, 8) x(backptr_untrusted, 8) \
x(has_child_snapshot, 9)
/* bits 20+ reserved for packed fields below: */ /* bits 20+ reserved for packed fields below: */
......
...@@ -603,6 +603,19 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res, ...@@ -603,6 +603,19 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
{ {
int ret; int ret;
if (closure_wait_event_timeout(&j->async_wait,
(ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
(flags & JOURNAL_RES_GET_NONBLOCK),
HZ * 10))
return ret;
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct printbuf buf = PRINTBUF;
bch2_journal_debug_to_text(&buf, j);
bch_err(c, "Journal stuck? Waited for 10 seconds...\n%s",
buf.buf);
printbuf_exit(&buf);
closure_wait_event(&j->async_wait, closure_wait_event(&j->async_wait,
(ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked || (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
(flags & JOURNAL_RES_GET_NONBLOCK)); (flags & JOURNAL_RES_GET_NONBLOCK));
......
...@@ -427,7 +427,9 @@ void bch2_opt_to_text(struct printbuf *out, ...@@ -427,7 +427,9 @@ void bch2_opt_to_text(struct printbuf *out,
prt_printf(out, "%lli", v); prt_printf(out, "%lli", v);
break; break;
case BCH_OPT_STR: case BCH_OPT_STR:
if (flags & OPT_SHOW_FULL_LIST) if (v < opt->min || v >= opt->max - 1)
prt_printf(out, "(invalid option %lli)", v);
else if (flags & OPT_SHOW_FULL_LIST)
prt_string_option(out, opt->choices, v); prt_string_option(out, opt->choices, v);
else else
prt_str(out, opt->choices[v]); prt_str(out, opt->choices[v]);
......
...@@ -287,7 +287,8 @@ int bch2_journal_replay(struct bch_fs *c) ...@@ -287,7 +287,8 @@ int bch2_journal_replay(struct bch_fs *c)
BCH_TRANS_COMMIT_no_enospc| BCH_TRANS_COMMIT_no_enospc|
BCH_TRANS_COMMIT_journal_reclaim| BCH_TRANS_COMMIT_journal_reclaim|
BCH_TRANS_COMMIT_skip_accounting_apply| BCH_TRANS_COMMIT_skip_accounting_apply|
BCH_TRANS_COMMIT_no_journal_res, BCH_TRANS_COMMIT_no_journal_res|
BCH_WATERMARK_reclaim,
bch2_journal_replay_accounting_key(trans, k)); bch2_journal_replay_accounting_key(trans, k));
if (bch2_fs_fatal_err_on(ret, c, "error replaying accounting; %s", bch2_err_str(ret))) if (bch2_fs_fatal_err_on(ret, c, "error replaying accounting; %s", bch2_err_str(ret)))
goto err; goto err;
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
x(check_dirents, 27, PASS_FSCK) \ x(check_dirents, 27, PASS_FSCK) \
x(check_xattrs, 28, PASS_FSCK) \ x(check_xattrs, 28, PASS_FSCK) \
x(check_root, 29, PASS_ONLINE|PASS_FSCK) \ x(check_root, 29, PASS_ONLINE|PASS_FSCK) \
x(check_unreachable_inodes, 40, PASS_ONLINE|PASS_FSCK) \
x(check_subvolume_structure, 36, PASS_ONLINE|PASS_FSCK) \ x(check_subvolume_structure, 36, PASS_ONLINE|PASS_FSCK) \
x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \ x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \
x(check_nlinks, 31, PASS_FSCK) \ x(check_nlinks, 31, PASS_FSCK) \
......
...@@ -66,7 +66,7 @@ void bch2_replicas_entry_to_text(struct printbuf *out, ...@@ -66,7 +66,7 @@ void bch2_replicas_entry_to_text(struct printbuf *out,
prt_printf(out, "]"); prt_printf(out, "]");
} }
static int bch2_replicas_entry_validate_locked(struct bch_replicas_entry_v1 *r, static int bch2_replicas_entry_sb_validate(struct bch_replicas_entry_v1 *r,
struct bch_sb *sb, struct bch_sb *sb,
struct printbuf *err) struct printbuf *err)
{ {
...@@ -98,10 +98,28 @@ int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r, ...@@ -98,10 +98,28 @@ int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
struct bch_fs *c, struct bch_fs *c,
struct printbuf *err) struct printbuf *err)
{ {
mutex_lock(&c->sb_lock); if (!r->nr_devs) {
int ret = bch2_replicas_entry_validate_locked(r, c->disk_sb.sb, err); prt_printf(err, "no devices in entry ");
mutex_unlock(&c->sb_lock); goto bad;
return ret; }
if (r->nr_required > 1 &&
r->nr_required >= r->nr_devs) {
prt_printf(err, "bad nr_required in entry ");
goto bad;
}
for (unsigned i = 0; i < r->nr_devs; i++)
if (r->devs[i] != BCH_SB_MEMBER_INVALID &&
!bch2_dev_exists(c, r->devs[i])) {
prt_printf(err, "invalid device %u in entry ", r->devs[i]);
goto bad;
}
return 0;
bad:
bch2_replicas_entry_to_text(err, r);
return -BCH_ERR_invalid_replicas_entry;
} }
void bch2_cpu_replicas_to_text(struct printbuf *out, void bch2_cpu_replicas_to_text(struct printbuf *out,
...@@ -686,7 +704,7 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r, ...@@ -686,7 +704,7 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
struct bch_replicas_entry_v1 *e = struct bch_replicas_entry_v1 *e =
cpu_replicas_entry(cpu_r, i); cpu_replicas_entry(cpu_r, i);
int ret = bch2_replicas_entry_validate_locked(e, sb, err); int ret = bch2_replicas_entry_sb_validate(e, sb, err);
if (ret) if (ret)
return ret; return ret;
...@@ -803,6 +821,11 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs, ...@@ -803,6 +821,11 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
rcu_read_lock(); rcu_read_lock();
for (unsigned i = 0; i < e->nr_devs; i++) { for (unsigned i = 0; i < e->nr_devs; i++) {
if (e->devs[i] == BCH_SB_MEMBER_INVALID) {
nr_failed++;
continue;
}
nr_online += test_bit(e->devs[i], devs.d); nr_online += test_bit(e->devs[i], devs.d);
struct bch_dev *ca = bch2_dev_rcu_noerror(c, e->devs[i]); struct bch_dev *ca = bch2_dev_rcu_noerror(c, e->devs[i]);
......
...@@ -78,7 +78,10 @@ ...@@ -78,7 +78,10 @@
BCH_FSCK_ERR_accounting_mismatch) \ BCH_FSCK_ERR_accounting_mismatch) \
x(rebalance_work_acct_fix, \ x(rebalance_work_acct_fix, \
BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
BCH_FSCK_ERR_accounting_mismatch) BCH_FSCK_ERR_accounting_mismatch) \
x(inode_has_child_snapshots, \
BIT_ULL(BCH_RECOVERY_PASS_check_inodes), \
BCH_FSCK_ERR_inode_has_child_snapshots_wrong)
#define DOWNGRADE_TABLE() \ #define DOWNGRADE_TABLE() \
x(bucket_stripe_sectors, \ x(bucket_stripe_sectors, \
......
...@@ -180,6 +180,7 @@ enum bch_fsck_flags { ...@@ -180,6 +180,7 @@ enum bch_fsck_flags {
x(reflink_p_to_missing_reflink_v, 166, 0) \ x(reflink_p_to_missing_reflink_v, 166, 0) \
x(stripe_pos_bad, 167, 0) \ x(stripe_pos_bad, 167, 0) \
x(stripe_val_size_bad, 168, 0) \ x(stripe_val_size_bad, 168, 0) \
x(stripe_csum_granularity_bad, 290, 0) \
x(stripe_sector_count_wrong, 169, 0) \ x(stripe_sector_count_wrong, 169, 0) \
x(snapshot_tree_pos_bad, 170, 0) \ x(snapshot_tree_pos_bad, 170, 0) \
x(snapshot_tree_to_missing_snapshot, 171, 0) \ x(snapshot_tree_to_missing_snapshot, 171, 0) \
...@@ -225,11 +226,13 @@ enum bch_fsck_flags { ...@@ -225,11 +226,13 @@ enum bch_fsck_flags {
x(inode_multiple_links_but_nlink_0, 207, FSCK_AUTOFIX) \ x(inode_multiple_links_but_nlink_0, 207, FSCK_AUTOFIX) \
x(inode_wrong_backpointer, 208, FSCK_AUTOFIX) \ x(inode_wrong_backpointer, 208, FSCK_AUTOFIX) \
x(inode_wrong_nlink, 209, FSCK_AUTOFIX) \ x(inode_wrong_nlink, 209, FSCK_AUTOFIX) \
x(inode_has_child_snapshots_wrong, 287, 0) \
x(inode_unreachable, 210, FSCK_AUTOFIX) \ x(inode_unreachable, 210, FSCK_AUTOFIX) \
x(deleted_inode_but_clean, 211, FSCK_AUTOFIX) \ x(deleted_inode_but_clean, 211, FSCK_AUTOFIX) \
x(deleted_inode_missing, 212, FSCK_AUTOFIX) \ x(deleted_inode_missing, 212, FSCK_AUTOFIX) \
x(deleted_inode_is_dir, 213, FSCK_AUTOFIX) \ x(deleted_inode_is_dir, 213, FSCK_AUTOFIX) \
x(deleted_inode_not_unlinked, 214, FSCK_AUTOFIX) \ x(deleted_inode_not_unlinked, 214, FSCK_AUTOFIX) \
x(deleted_inode_has_child_snapshots, 288, FSCK_AUTOFIX) \
x(extent_overlapping, 215, 0) \ x(extent_overlapping, 215, 0) \
x(key_in_missing_inode, 216, 0) \ x(key_in_missing_inode, 216, 0) \
x(key_in_wrong_inode_type, 217, 0) \ x(key_in_wrong_inode_type, 217, 0) \
...@@ -289,6 +292,7 @@ enum bch_fsck_flags { ...@@ -289,6 +292,7 @@ enum bch_fsck_flags {
x(alloc_key_stripe_sectors_wrong, 271, FSCK_AUTOFIX) \ x(alloc_key_stripe_sectors_wrong, 271, FSCK_AUTOFIX) \
x(accounting_mismatch, 272, FSCK_AUTOFIX) \ x(accounting_mismatch, 272, FSCK_AUTOFIX) \
x(accounting_replicas_not_marked, 273, 0) \ x(accounting_replicas_not_marked, 273, 0) \
x(accounting_to_invalid_device, 289, 0) \
x(invalid_btree_id, 274, 0) \ x(invalid_btree_id, 274, 0) \
x(alloc_key_io_time_bad, 275, 0) \ x(alloc_key_io_time_bad, 275, 0) \
x(alloc_key_fragmentation_lru_wrong, 276, FSCK_AUTOFIX) \ x(alloc_key_fragmentation_lru_wrong, 276, FSCK_AUTOFIX) \
...@@ -298,7 +302,7 @@ enum bch_fsck_flags { ...@@ -298,7 +302,7 @@ enum bch_fsck_flags {
x(accounting_key_replicas_devs_unsorted, 280, FSCK_AUTOFIX) \ x(accounting_key_replicas_devs_unsorted, 280, FSCK_AUTOFIX) \
x(accounting_key_version_0, 282, FSCK_AUTOFIX) \ x(accounting_key_version_0, 282, FSCK_AUTOFIX) \
x(logged_op_but_clean, 283, FSCK_AUTOFIX) \ x(logged_op_but_clean, 283, FSCK_AUTOFIX) \
x(MAX, 287, 0) x(MAX, 291, 0)
enum bch_sb_error_id { enum bch_sb_error_id {
#define x(t, n, ...) BCH_FSCK_ERR_##t = n, #define x(t, n, ...) BCH_FSCK_ERR_##t = n,
......
...@@ -163,6 +163,11 @@ static int validate_member(struct printbuf *err, ...@@ -163,6 +163,11 @@ static int validate_member(struct printbuf *err,
return -BCH_ERR_invalid_sb_members; return -BCH_ERR_invalid_sb_members;
} }
if (m.btree_bitmap_shift >= 64) {
prt_printf(err, "device %u: invalid btree_bitmap_shift %u", i, m.btree_bitmap_shift);
return -BCH_ERR_invalid_sb_members;
}
return 0; return 0;
} }
...@@ -247,7 +252,10 @@ static void member_to_text(struct printbuf *out, ...@@ -247,7 +252,10 @@ static void member_to_text(struct printbuf *out,
prt_newline(out); prt_newline(out);
prt_printf(out, "Btree allocated bitmap blocksize:\t"); prt_printf(out, "Btree allocated bitmap blocksize:\t");
if (m.btree_bitmap_shift < 64)
prt_units_u64(out, 1ULL << m.btree_bitmap_shift); prt_units_u64(out, 1ULL << m.btree_bitmap_shift);
else
prt_printf(out, "(invalid shift %u)", m.btree_bitmap_shift);
prt_newline(out); prt_newline(out);
prt_printf(out, "Btree allocated bitmap:\t"); prt_printf(out, "Btree allocated bitmap:\t");
......
...@@ -905,12 +905,30 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id) ...@@ -905,12 +905,30 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id)
if (bch2_snapshot_equiv(c, id)) if (bch2_snapshot_equiv(c, id))
return 0; return 0;
/* 0 is an invalid tree ID */ /* Do we need to reconstruct the snapshot_tree entry as well? */
struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
u32 tree_id = 0; u32 tree_id = 0;
int ret = bch2_snapshot_tree_create(trans, id, 0, &tree_id);
for_each_btree_key_norestart(trans, iter, BTREE_ID_snapshot_trees, POS_MIN,
0, k, ret) {
if (le32_to_cpu(bkey_s_c_to_snapshot_tree(k).v->root_snapshot) == id) {
tree_id = k.k->p.offset;
break;
}
}
bch2_trans_iter_exit(trans, &iter);
if (ret) if (ret)
return ret; return ret;
if (!tree_id) {
ret = bch2_snapshot_tree_create(trans, id, 0, &tree_id);
if (ret)
return ret;
}
struct bkey_i_snapshot *snapshot = bch2_trans_kmalloc(trans, sizeof(*snapshot)); struct bkey_i_snapshot *snapshot = bch2_trans_kmalloc(trans, sizeof(*snapshot));
ret = PTR_ERR_OR_ZERO(snapshot); ret = PTR_ERR_OR_ZERO(snapshot);
if (ret) if (ret)
...@@ -921,6 +939,16 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id) ...@@ -921,6 +939,16 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id)
snapshot->v.tree = cpu_to_le32(tree_id); snapshot->v.tree = cpu_to_le32(tree_id);
snapshot->v.btime.lo = cpu_to_le64(bch2_current_time(c)); snapshot->v.btime.lo = cpu_to_le64(bch2_current_time(c));
for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
0, k, ret) {
if (le32_to_cpu(bkey_s_c_to_subvolume(k).v->snapshot) == id) {
snapshot->v.subvol = cpu_to_le32(k.k->p.offset);
SET_BCH_SNAPSHOT_SUBVOL(&snapshot->v, true);
break;
}
}
bch2_trans_iter_exit(trans, &iter);
return bch2_btree_insert_trans(trans, BTREE_ID_snapshots, &snapshot->k_i, 0) ?: return bch2_btree_insert_trans(trans, BTREE_ID_snapshots, &snapshot->k_i, 0) ?:
bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
bkey_s_c_null, bkey_i_to_s(&snapshot->k_i), 0) ?: bkey_s_c_null, bkey_i_to_s(&snapshot->k_i), 0) ?:
...@@ -1732,103 +1760,6 @@ int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans, ...@@ -1732,103 +1760,6 @@ int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
return ret; return ret;
} }
static u32 bch2_snapshot_smallest_child(struct bch_fs *c, u32 id)
{
const struct snapshot_t *s = snapshot_t(c, id);
return s->children[1] ?: s->children[0];
}
static u32 bch2_snapshot_smallest_descendent(struct bch_fs *c, u32 id)
{
u32 child;
while ((child = bch2_snapshot_smallest_child(c, id)))
id = child;
return id;
}
static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
enum btree_id btree,
struct bkey_s_c interior_k,
u32 leaf_id, struct bpos *new_min_pos)
{
struct btree_iter iter;
struct bpos pos = interior_k.k->p;
struct bkey_s_c k;
struct bkey_i *new;
int ret;
pos.snapshot = leaf_id;
bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto out;
/* key already overwritten in this snapshot? */
if (k.k->p.snapshot != interior_k.k->p.snapshot)
goto out;
if (bpos_eq(*new_min_pos, POS_MIN)) {
*new_min_pos = k.k->p;
new_min_pos->snapshot = leaf_id;
}
new = bch2_bkey_make_mut_noupdate(trans, interior_k);
ret = PTR_ERR_OR_ZERO(new);
if (ret)
goto out;
new->k.p.snapshot = leaf_id;
ret = bch2_trans_update(trans, &iter, new, 0);
out:
bch2_set_btree_iter_dontneed(&iter);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
int bch2_propagate_key_to_snapshot_leaves(struct btree_trans *trans,
enum btree_id btree,
struct bkey_s_c k,
struct bpos *new_min_pos)
{
struct bch_fs *c = trans->c;
struct bkey_buf sk;
u32 restart_count = trans->restart_count;
int ret = 0;
bch2_bkey_buf_init(&sk);
bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
*new_min_pos = POS_MIN;
for (u32 id = bch2_snapshot_smallest_descendent(c, k.k->p.snapshot);
id < k.k->p.snapshot;
id++) {
if (!bch2_snapshot_is_ancestor(c, id, k.k->p.snapshot) ||
!bch2_snapshot_is_leaf(c, id))
continue;
again:
ret = btree_trans_too_many_iters(trans) ?:
bch2_propagate_key_to_snapshot_leaf(trans, btree, k, id, new_min_pos) ?:
bch2_trans_commit(trans, NULL, NULL, 0);
if (ret && bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
bch2_trans_begin(trans);
goto again;
}
if (ret)
break;
}
bch2_bkey_buf_exit(&sk, c);
return ret ?: trans_was_restarted(trans, restart_count);
}
static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k) static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
......
...@@ -259,9 +259,6 @@ static inline int bch2_key_has_snapshot_overwrites(struct btree_trans *trans, ...@@ -259,9 +259,6 @@ static inline int bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
return __bch2_key_has_snapshot_overwrites(trans, id, pos); return __bch2_key_has_snapshot_overwrites(trans, id, pos);
} }
int bch2_propagate_key_to_snapshot_leaves(struct btree_trans *, enum btree_id,
struct bkey_s_c, struct bpos *);
int bch2_snapshots_read(struct bch_fs *); int bch2_snapshots_read(struct bch_fs *);
void bch2_fs_snapshots_exit(struct bch_fs *); void bch2_fs_snapshots_exit(struct bch_fs *);
......
...@@ -184,6 +184,7 @@ static DEFINE_MUTEX(bch_fs_list_lock); ...@@ -184,6 +184,7 @@ static DEFINE_MUTEX(bch_fs_list_lock);
DECLARE_WAIT_QUEUE_HEAD(bch2_read_only_wait); DECLARE_WAIT_QUEUE_HEAD(bch2_read_only_wait);
static void bch2_dev_unlink(struct bch_dev *);
static void bch2_dev_free(struct bch_dev *); static void bch2_dev_free(struct bch_dev *);
static int bch2_dev_alloc(struct bch_fs *, unsigned); static int bch2_dev_alloc(struct bch_fs *, unsigned);
static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *); static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
...@@ -620,9 +621,7 @@ void __bch2_fs_stop(struct bch_fs *c) ...@@ -620,9 +621,7 @@ void __bch2_fs_stop(struct bch_fs *c)
up_write(&c->state_lock); up_write(&c->state_lock);
for_each_member_device(c, ca) for_each_member_device(c, ca)
if (ca->kobj.state_in_sysfs && bch2_dev_unlink(ca);
ca->disk_sb.bdev)
sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
if (c->kobj.state_in_sysfs) if (c->kobj.state_in_sysfs)
kobject_del(&c->kobj); kobject_del(&c->kobj);
...@@ -1187,9 +1186,7 @@ static void bch2_dev_free(struct bch_dev *ca) ...@@ -1187,9 +1186,7 @@ static void bch2_dev_free(struct bch_dev *ca)
{ {
cancel_work_sync(&ca->io_error_work); cancel_work_sync(&ca->io_error_work);
if (ca->kobj.state_in_sysfs && bch2_dev_unlink(ca);
ca->disk_sb.bdev)
sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
if (ca->kobj.state_in_sysfs) if (ca->kobj.state_in_sysfs)
kobject_del(&ca->kobj); kobject_del(&ca->kobj);
...@@ -1226,10 +1223,7 @@ static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca) ...@@ -1226,10 +1223,7 @@ static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
percpu_ref_kill(&ca->io_ref); percpu_ref_kill(&ca->io_ref);
wait_for_completion(&ca->io_ref_completion); wait_for_completion(&ca->io_ref_completion);
if (ca->kobj.state_in_sysfs) { bch2_dev_unlink(ca);
sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
sysfs_remove_link(&ca->kobj, "block");
}
bch2_free_super(&ca->disk_sb); bch2_free_super(&ca->disk_sb);
bch2_dev_journal_exit(ca); bch2_dev_journal_exit(ca);
...@@ -1251,6 +1245,26 @@ static void bch2_dev_io_ref_complete(struct percpu_ref *ref) ...@@ -1251,6 +1245,26 @@ static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
complete(&ca->io_ref_completion); complete(&ca->io_ref_completion);
} }
static void bch2_dev_unlink(struct bch_dev *ca)
{
struct kobject *b;
/*
* This is racy w.r.t. the underlying block device being hot-removed,
* which removes it from sysfs.
*
* It'd be lovely if we had a way to handle this race, but the sysfs
* code doesn't appear to provide a good method and block/holder.c is
* susceptible as well:
*/
if (ca->kobj.state_in_sysfs &&
ca->disk_sb.bdev &&
(b = bdev_kobj(ca->disk_sb.bdev))->state_in_sysfs) {
sysfs_remove_link(b, "bcachefs");
sysfs_remove_link(&ca->kobj, "block");
}
}
static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca) static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
{ {
int ret; int ret;
......
...@@ -454,4 +454,39 @@ do { \ ...@@ -454,4 +454,39 @@ do { \
__closure_wait_event(waitlist, _cond); \ __closure_wait_event(waitlist, _cond); \
} while (0) } while (0)
#define __closure_wait_event_timeout(waitlist, _cond, _until) \
({ \
struct closure cl; \
long _t; \
\
closure_init_stack(&cl); \
\
while (1) { \
closure_wait(waitlist, &cl); \
if (_cond) { \
_t = max_t(long, 1L, _until - jiffies); \
break; \
} \
_t = max_t(long, 0L, _until - jiffies); \
if (!_t) \
break; \
closure_sync_timeout(&cl, _t); \
} \
closure_wake_up(waitlist); \
closure_sync(&cl); \
_t; \
})
/*
* Returns 0 if timeout expired, remaining time in jiffies (at least 1) if
* condition became true
*/
#define closure_wait_event_timeout(waitlist, _cond, _timeout) \
({ \
unsigned long _until = jiffies + _timeout; \
(_cond) \
? max_t(long, 1L, _until - jiffies) \
: __closure_wait_event_timeout(waitlist, _cond, _until);\
})
#endif /* _LINUX_CLOSURE_H */ #endif /* _LINUX_CLOSURE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment