Commit 94f651e2 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Return errors from for_each_btree_key()

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 201a4d4c
......@@ -273,14 +273,14 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k) {
for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret)
bch2_alloc_read_key(c, k);
bch2_trans_cond_resched(&trans);
}
ret = bch2_trans_exit(&trans);
if (ret)
ret = bch2_trans_exit(&trans) ?: ret;
if (ret) {
bch_err(c, "error reading alloc info: %i", ret);
return ret;
}
for_each_journal_key(*journal_keys, j)
if (j->btree_id == BTREE_ID_ALLOC)
......
......@@ -289,7 +289,7 @@ static int mark_journal_key(struct bch_fs *c, enum btree_id id,
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, id, bkey_start_pos(&insert->k),
BTREE_ITER_SLOTS, k) {
BTREE_ITER_SLOTS, k, ret) {
percpu_down_read(&c->mark_lock);
ret = bch2_mark_overwrite(&trans, iter, k, insert, NULL,
BCH_BUCKET_MARK_GC|
......@@ -300,7 +300,7 @@ static int mark_journal_key(struct bch_fs *c, enum btree_id id,
break;
}
return bch2_trans_exit(&trans);
return bch2_trans_exit(&trans) ?: ret;
}
static int bch2_gc_btrees(struct bch_fs *c, struct journal_keys *journal_keys,
......
......@@ -238,12 +238,16 @@ static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
: bch2_btree_iter_next(iter);
}
#define for_each_btree_key(_trans, _iter, _btree_id, _start, _flags, _k)\
for (iter = bch2_trans_get_iter((_trans), (_btree_id), \
(_start), (_flags)), \
(_k) = __bch2_btree_iter_peek(_iter, _flags); \
!IS_ERR_OR_NULL((_k).k); \
(_k) = __bch2_btree_iter_next(_iter, _flags))
#define for_each_btree_key(_trans, _iter, _btree_id, \
_start, _flags, _k, _ret) \
for ((_ret) = PTR_ERR_OR_ZERO((_iter) = \
bch2_trans_get_iter((_trans), (_btree_id), \
(_start), (_flags))) ?: \
PTR_ERR_OR_ZERO(((_k) = \
__bch2_btree_iter_peek(_iter, _flags)).k); \
!ret && (_k).k; \
(_ret) = PTR_ERR_OR_ZERO(((_k) = \
__bch2_btree_iter_next(_iter, _flags)).k))
#define for_each_btree_key_continue(_iter, _flags, _k) \
for ((_k) = __bch2_btree_iter_peek(_iter, _flags); \
......
......@@ -1035,12 +1035,12 @@ int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
return ret;
}
inline bool bch2_mark_overwrite(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c old,
struct bkey_i *new,
struct bch_fs_usage *fs_usage,
unsigned flags)
inline int bch2_mark_overwrite(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c old,
struct bkey_i *new,
struct bch_fs_usage *fs_usage,
unsigned flags)
{
struct bch_fs *c = trans->c;
struct btree *b = iter->l[0].b;
......@@ -1049,7 +1049,7 @@ inline bool bch2_mark_overwrite(struct btree_trans *trans,
if (btree_node_is_extents(b)
? bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0
: bkey_cmp(new->k.p, old.k->p))
return false;
return 0;
if (btree_node_is_extents(b)) {
switch (bch2_extent_overlap(&new->k, old.k)) {
......@@ -1080,24 +1080,24 @@ inline bool bch2_mark_overwrite(struct btree_trans *trans,
BUG_ON(sectors >= 0);
}
bch2_mark_key_locked(c, old, false, sectors,
fs_usage, trans->journal_res.seq, flags);
return true;
return bch2_mark_key_locked(c, old, false, sectors, fs_usage,
trans->journal_res.seq, flags) ?: 1;
}
void bch2_mark_update(struct btree_trans *trans,
struct btree_insert_entry *insert,
struct bch_fs_usage *fs_usage,
unsigned flags)
int bch2_mark_update(struct btree_trans *trans,
struct btree_insert_entry *insert,
struct bch_fs_usage *fs_usage,
unsigned flags)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
struct bkey_packed *_k;
int ret = 0;
if (!btree_node_type_needs_gc(iter->btree_id))
return;
return 0;
bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), true,
bpos_min(insert->k->k.p, b->key.k.p).offset -
......@@ -1105,7 +1105,7 @@ void bch2_mark_update(struct btree_trans *trans,
fs_usage, trans->journal_res.seq, flags);
if (unlikely(trans->flags & BTREE_INSERT_NOMARK_OVERWRITES))
return;
return 0;
/*
* For non extents, we only mark the new key, not the key being
......@@ -1114,19 +1114,22 @@ void bch2_mark_update(struct btree_trans *trans,
if ((iter->btree_id == BTREE_ID_ALLOC ||
iter->btree_id == BTREE_ID_EC) &&
!bkey_deleted(&insert->k->k))
return;
return 0;
while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
KEY_TYPE_discard))) {
struct bkey unpacked;
struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
if (!bch2_mark_overwrite(trans, iter, k, insert->k,
fs_usage, flags))
ret = bch2_mark_overwrite(trans, iter, k, insert->k,
fs_usage, flags);
if (ret <= 0)
break;
bch2_btree_node_iter_advance(&node_iter, b);
}
return ret;
}
void bch2_trans_fs_usage_apply(struct btree_trans *trans,
......
......@@ -254,11 +254,11 @@ int bch2_mark_key(struct bch_fs *, struct bkey_s_c,
int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
struct disk_reservation *);
bool bch2_mark_overwrite(struct btree_trans *, struct btree_iter *,
struct bkey_s_c, struct bkey_i *,
struct bch_fs_usage *, unsigned);
void bch2_mark_update(struct btree_trans *, struct btree_insert_entry *,
struct bch_fs_usage *, unsigned);
int bch2_mark_overwrite(struct btree_trans *, struct btree_iter *,
struct bkey_s_c, struct bkey_i *,
struct bch_fs_usage *, unsigned);
int bch2_mark_update(struct btree_trans *, struct btree_insert_entry *,
struct bch_fs_usage *, unsigned);
void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage *);
/* disk reservations: */
......
......@@ -333,14 +333,10 @@ int bch2_empty_dir_trans(struct btree_trans *trans, u64 dir_inum)
{
struct btree_iter *iter;
struct bkey_s_c k;
int ret = 0;
iter = bch2_trans_get_iter(trans, BTREE_ID_DIRENTS,
POS(dir_inum, 0), 0);
if (IS_ERR(iter))
return PTR_ERR(iter);
int ret;
for_each_btree_key_continue(iter, 0, k) {
for_each_btree_key(trans, iter, BTREE_ID_DIRENTS,
POS(dir_inum, 0), 0, k, ret) {
if (k.k->p.inode > dir_inum)
break;
......@@ -369,6 +365,7 @@ int bch2_readdir(struct bch_fs *c, struct file *file,
struct bkey_s_c k;
struct bkey_s_c_dirent dirent;
unsigned len;
int ret;
if (!dir_emit_dots(file, ctx))
return 0;
......@@ -376,7 +373,7 @@ int bch2_readdir(struct bch_fs *c, struct file *file,
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
POS(inode->v.i_ino, ctx->pos), 0, k) {
POS(inode->v.i_ino, ctx->pos), 0, k, ret) {
if (k.k->type != KEY_TYPE_dirent)
continue;
......@@ -401,7 +398,7 @@ int bch2_readdir(struct bch_fs *c, struct file *file,
ctx->pos = k.k->p.offset + 1;
}
bch2_trans_exit(&trans);
ret = bch2_trans_exit(&trans) ?: ret;
return 0;
return ret;
}
......@@ -679,10 +679,8 @@ static int ec_stripe_bkey_insert(struct bch_fs *c,
bch2_trans_begin(&trans);
/* XXX: start pos hint */
iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0)
break;
......@@ -690,7 +688,8 @@ static int ec_stripe_bkey_insert(struct bch_fs *c,
goto found_slot;
}
ret = -ENOSPC;
if (!ret)
ret = -ENOSPC;
goto out;
found_slot:
ret = ec_stripe_mem_alloc(c, iter);
......@@ -1249,14 +1248,14 @@ int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN, 0, k) {
for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN, 0, k, ret)
bch2_stripe_read_key(c, k);
bch2_trans_cond_resched(&trans);
}
ret = bch2_trans_exit(&trans);
if (ret)
ret = bch2_trans_exit(&trans) ?: ret;
if (ret) {
bch_err(c, "error reading stripes: %i", ret);
return ret;
}
for_each_journal_key(*journal_keys, i)
if (i->btree_id == BTREE_ID_EC)
......
......@@ -1632,13 +1632,14 @@ bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
struct bpos end = pos;
struct bkey_s_c k;
bool ret = true;
int err;
end.offset += size;
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
BTREE_ITER_SLOTS, k) {
BTREE_ITER_SLOTS, k, err) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
......
......@@ -2139,7 +2139,7 @@ static inline int range_has_data(struct bch_fs *c,
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k) {
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
......@@ -2732,7 +2732,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(inode->v.i_ino, offset >> 9), 0, k) {
POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
break;
} else if (bkey_extent_is_data(k.k)) {
......@@ -2742,7 +2742,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
break;
}
ret = bch2_trans_exit(&trans);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
return ret;
......@@ -2806,7 +2806,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(inode->v.i_ino, offset >> 9),
BTREE_ITER_SLOTS, k) {
BTREE_ITER_SLOTS, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
next_hole = bch2_next_pagecache_hole(&inode->v,
offset, MAX_LFS_FILESIZE);
......@@ -2823,7 +2823,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
}
}
ret = bch2_trans_exit(&trans);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
return ret;
......
......@@ -1210,7 +1210,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(ei->v.i_ino, start >> 9), 0, k)
POS(ei->v.i_ino, start >> 9), 0, k, ret)
if (bkey_extent_is_data(k.k) ||
k.k->type == KEY_TYPE_reservation) {
if (bkey_cmp(bkey_start_pos(k.k),
......@@ -1220,17 +1220,17 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
if (have_extent) {
ret = bch2_fill_extent(info, &tmp.k, 0);
if (ret)
goto out;
break;
}
bkey_reassemble(&tmp.k, k);
have_extent = true;
}
if (have_extent)
if (!ret && have_extent)
ret = bch2_fill_extent(info, &tmp.k, FIEMAP_EXTENT_LAST);
out:
bch2_trans_exit(&trans);
ret = bch2_trans_exit(&trans) ?: ret;
return ret < 0 ? ret : 0;
}
......
......@@ -21,8 +21,10 @@ static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum)
struct btree_iter *iter;
struct bkey_s_c k;
u64 sectors = 0;
int ret;
for_each_btree_key(trans, iter, BTREE_ID_EXTENTS, POS(inum, 0), 0, k) {
for_each_btree_key(trans, iter, BTREE_ID_EXTENTS,
POS(inum, 0), 0, k, ret) {
if (k.k->p.inode != inum)
break;
......@@ -30,7 +32,9 @@ static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum)
sectors += k.k->size;
}
return bch2_trans_iter_free(trans, iter) ?: sectors;
bch2_trans_iter_free(trans, iter);
return ret ?: sectors;
}
static int remove_dirent(struct btree_trans *trans,
......@@ -942,7 +946,7 @@ static int check_directory_structure(struct bch_fs *c,
goto up;
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
POS(e->inum, e->offset + 1), 0, k) {
POS(e->inum, e->offset + 1), 0, k, ret) {
if (k.k->p.inode != e->inum)
break;
......@@ -985,7 +989,7 @@ static int check_directory_structure(struct bch_fs *c,
}
goto next;
}
ret = bch2_trans_iter_free(&trans, iter);
ret = bch2_trans_iter_free(&trans, iter) ?: ret;
if (ret) {
bch_err(c, "btree error %i in fsck", ret);
goto err;
......@@ -1087,7 +1091,7 @@ static int bch2_gc_walk_dirents(struct bch_fs *c, nlink_table *links,
inc_link(c, links, range_start, range_end, BCACHEFS_ROOT_INO, false);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k) {
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k, ret) {
switch (k.k->type) {
case KEY_TYPE_dirent:
d = bkey_s_c_to_dirent(k);
......@@ -1105,7 +1109,7 @@ static int bch2_gc_walk_dirents(struct bch_fs *c, nlink_table *links,
bch2_trans_cond_resched(&trans);
}
ret = bch2_trans_exit(&trans);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
bch_err(c, "error in fs gc: btree error %i while walking dirents", ret);
......@@ -1432,15 +1436,12 @@ static int check_inodes_fast(struct bch_fs *c)
struct btree_iter *iter;
struct bkey_s_c k;
struct bkey_s_c_inode inode;
int ret = 0, ret2;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES,
POS_MIN, 0);
for_each_btree_key_continue(iter, 0, k) {
for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN, 0, k, ret) {
if (k.k->type != KEY_TYPE_inode)
continue;
......@@ -1456,10 +1457,9 @@ static int check_inodes_fast(struct bch_fs *c)
break;
}
}
BUG_ON(ret == -EINTR);
ret2 = bch2_trans_exit(&trans);
return ret ?: ret2;
return bch2_trans_exit(&trans) ?: ret;
}
/*
......
......@@ -1326,7 +1326,7 @@ static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio,
retry:
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(inode, bvec_iter.bi_sector),
BTREE_ITER_SLOTS, k) {
BTREE_ITER_SLOTS, k, ret) {
BKEY_PADDED(k) tmp;
unsigned bytes;
......@@ -1357,8 +1357,8 @@ static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio,
* If we get here, it better have been because there was an error
* reading a btree node
*/
BUG_ON(!btree_iter_err(iter));
__bcache_io_error(c, "btree IO error");
BUG_ON(!ret);
__bcache_io_error(c, "btree IO error: %i", ret);
err:
rbio->bio.bi_status = BLK_STS_IOERR;
out:
......@@ -1871,6 +1871,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
unsigned flags = BCH_READ_RETRY_IF_STALE|
BCH_READ_MAY_PROMOTE|
BCH_READ_USER_MAPPED;
int ret;
bch2_trans_init(&trans, c);
......@@ -1883,7 +1884,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(inode, rbio->bio.bi_iter.bi_sector),
BTREE_ITER_SLOTS, k) {
BTREE_ITER_SLOTS, k, ret) {
BKEY_PADDED(k) tmp;
unsigned bytes;
......@@ -1915,8 +1916,8 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
* If we get here, it better have been because there was an error
* reading a btree node
*/
BUG_ON(!btree_iter_err(iter));
bcache_io_error(c, &rbio->bio, "btree IO error");
BUG_ON(!ret);
bcache_io_error(c, &rbio->bio, "btree IO error: %i", ret);
bch2_trans_exit(&trans);
bch2_rbio_done(rbio);
......
......@@ -630,7 +630,7 @@ static int bch2_gc_data_replicas(struct bch_fs *c)
bch2_replicas_gc_start(c, (1 << BCH_DATA_USER)|(1 << BCH_DATA_CACHED));
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
BTREE_ITER_PREFETCH, k) {
BTREE_ITER_PREFETCH, k, ret) {
ret = bch2_mark_bkey_replicas(c, k);
if (ret)
break;
......
......@@ -364,7 +364,7 @@ static int bch2_quota_init_type(struct bch_fs *c, enum quota_types type)
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_QUOTAS, POS(type, 0),
BTREE_ITER_PREFETCH, k) {
BTREE_ITER_PREFETCH, k, ret) {
if (k.k->p.inode != type)
break;
......@@ -436,7 +436,7 @@ int bch2_fs_quota_read(struct bch_fs *c)
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN,
BTREE_ITER_PREFETCH, k) {
BTREE_ITER_PREFETCH, k, ret) {
switch (k.k->type) {
case KEY_TYPE_inode:
ret = bch2_inode_unpack(bkey_s_c_to_inode(k), &u);
......
......@@ -134,14 +134,11 @@ bch2_hash_lookup(struct btree_trans *trans,
{
struct btree_iter *iter;
struct bkey_s_c k;
int ret;
iter = bch2_trans_get_iter(trans, desc.btree_id,
POS(inode, desc.hash_key(info, key)),
BTREE_ITER_SLOTS|flags);
if (IS_ERR(iter))
return iter;
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
for_each_btree_key(trans, iter, desc.btree_id,
POS(inode, desc.hash_key(info, key)),
BTREE_ITER_SLOTS|flags, k, ret) {
if (iter->pos.inode != inode)
break;
......@@ -156,7 +153,7 @@ bch2_hash_lookup(struct btree_trans *trans,
}
}
return IS_ERR(k.k) ? ERR_CAST(k.k) : ERR_PTR(-ENOENT);
return ERR_PTR(ret ?: -ENOENT);
}
static __always_inline struct btree_iter *
......@@ -167,14 +164,11 @@ bch2_hash_hole(struct btree_trans *trans,
{
struct btree_iter *iter;
struct bkey_s_c k;
int ret;
iter = bch2_trans_get_iter(trans, desc.btree_id,
POS(inode, desc.hash_key(info, key)),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
if (IS_ERR(iter))
return iter;
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
for_each_btree_key(trans, iter, desc.btree_id,
POS(inode, desc.hash_key(info, key)),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (iter->pos.inode != inode)
break;
......@@ -182,7 +176,7 @@ bch2_hash_hole(struct btree_trans *trans,
return iter;
}
return IS_ERR(k.k) ? ERR_CAST(k.k) : ERR_PTR(-ENOSPC);
return ERR_PTR(ret ?: -ENOSPC);
}
static __always_inline
......@@ -224,15 +218,11 @@ int bch2_hash_set(struct btree_trans *trans,
struct btree_iter *iter, *slot = NULL;
struct bkey_s_c k;
bool found = false;
int ret = 0;
iter = bch2_trans_get_iter(trans, desc.btree_id,
POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
if (IS_ERR(iter))
return PTR_ERR(iter);
int ret;
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
for_each_btree_key(trans, iter, desc.btree_id,
POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (iter->pos.inode != inode)
break;
......@@ -256,9 +246,10 @@ int bch2_hash_set(struct btree_trans *trans,
}
if (slot)
bch2_trans_iter_free(trans, iter);
bch2_trans_iter_free(trans, slot);
bch2_trans_iter_free(trans, iter);
return bch2_trans_iter_free(trans, iter) ?: -ENOSPC;
return ret ?: -ENOSPC;
found:
found = true;
not_found:
......
......@@ -289,13 +289,14 @@ static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
nr_compressed_extents = 0,
compressed_sectors_compressed = 0,
compressed_sectors_uncompressed = 0;
int ret;
if (!test_bit(BCH_FS_STARTED, &c->flags))
return -EPERM;
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN, 0, k)
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN, 0, k, ret)
if (k.k->type == KEY_TYPE_extent) {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const union bch_extent_entry *entry;
......@@ -317,7 +318,10 @@ static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
break;
}
}
bch2_trans_exit(&trans);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
return ret;
return scnprintf(buf, PAGE_SIZE,
"uncompressed data:\n"
......
......@@ -116,7 +116,8 @@ static void test_iterate(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS(0, 0), 0, k)
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
POS_MIN, 0, k, ret)
BUG_ON(k.k->p.offset != i++);
BUG_ON(i != nr);
......@@ -161,7 +162,8 @@ static void test_iterate_extents(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS_MIN, 0, k, ret) {
BUG_ON(bkey_start_offset(k.k) != i);
i = k.k->p.offset;
}
......@@ -209,7 +211,8 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS(0, 0), 0, k) {
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
0, k, ret) {
BUG_ON(k.k->p.offset != i);
i += 2;
}
......@@ -221,8 +224,8 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS(0, 0),
BTREE_ITER_SLOTS, k) {
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
BTREE_ITER_SLOTS, k, ret) {
BUG_ON(bkey_deleted(k.k) != (i & 1));
BUG_ON(k.k->p.offset != i++);
......@@ -263,7 +266,8 @@ static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(0, 0), 0, k) {
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
0, k, ret) {
BUG_ON(bkey_start_offset(k.k) != i + 8);
BUG_ON(k.k->size != 8);
i += 16;
......@@ -276,8 +280,8 @@ static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS(0, 0),
BTREE_ITER_SLOTS, k) {
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
BTREE_ITER_SLOTS, k, ret) {
BUG_ON(bkey_deleted(k.k) != !(i % 16));
BUG_ON(bkey_start_offset(k.k) != i);
......@@ -501,10 +505,8 @@ static void seq_insert(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
insert.k.p = iter->pos;
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &insert.k_i));
......@@ -522,10 +524,11 @@ static void seq_lookup(struct bch_fs *c, u64 nr)
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
int ret;
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k)
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k, ret)
;
bch2_trans_exit(&trans);
}
......@@ -539,10 +542,8 @@ static void seq_overwrite(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN,
BTREE_ITER_INTENT);
for_each_btree_key_continue(iter, 0, k) {
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
BTREE_ITER_INTENT, k, ret) {
struct bkey_i_cookie u;
bkey_reassemble(&u.k_i, k);
......
......@@ -198,55 +198,54 @@ int bch2_xattr_set(struct btree_trans *trans, u64 inum,
return ret;
}
static void __bch2_xattr_emit(const char *prefix,
const char *name, size_t name_len,
char **buffer, size_t *buffer_size,
ssize_t *ret)
struct xattr_buf {
char *buf;
size_t len;
size_t used;
};
static int __bch2_xattr_emit(const char *prefix,
const char *name, size_t name_len,
struct xattr_buf *buf)
{
const size_t prefix_len = strlen(prefix);
const size_t total_len = prefix_len + name_len + 1;
if (*buffer) {
if (total_len > *buffer_size) {
*ret = -ERANGE;
return;
}
if (buf->buf) {
if (buf->used + total_len > buf->len)
return -ERANGE;
memcpy(*buffer, prefix, prefix_len);
memcpy(*buffer + prefix_len,
memcpy(buf->buf + buf->used, prefix, prefix_len);
memcpy(buf->buf + buf->used + prefix_len,
name, name_len);
(*buffer)[prefix_len + name_len] = '\0';
*buffer += total_len;
*buffer_size -= total_len;
buf->buf[buf->used + prefix_len + name_len] = '\0';
}
*ret += total_len;
buf->used += total_len;
return 0;
}
static void bch2_xattr_emit(struct dentry *dentry,
static int bch2_xattr_emit(struct dentry *dentry,
const struct bch_xattr *xattr,
char **buffer, size_t *buffer_size,
ssize_t *ret)
struct xattr_buf *buf)
{
const struct xattr_handler *handler =
bch2_xattr_type_to_handler(xattr->x_type);
if (handler && (!handler->list || handler->list(dentry)))
__bch2_xattr_emit(handler->prefix ?: handler->name,
xattr->x_name, xattr->x_name_len,
buffer, buffer_size, ret);
return handler && (!handler->list || handler->list(dentry))
? __bch2_xattr_emit(handler->prefix ?: handler->name,
xattr->x_name, xattr->x_name_len, buf)
: 0;
}
static void bch2_xattr_list_bcachefs(struct bch_fs *c,
struct bch_inode_info *inode,
char **buffer,
size_t *buffer_size,
ssize_t *ret,
bool all)
static int bch2_xattr_list_bcachefs(struct bch_fs *c,
struct bch_inode_info *inode,
struct xattr_buf *buf,
bool all)
{
const char *prefix = all ? "bcachefs_effective." : "bcachefs.";
unsigned id;
int ret = 0;
u64 v;
for (id = 0; id < Inode_opt_nr; id++) {
......@@ -258,13 +257,13 @@ static void bch2_xattr_list_bcachefs(struct bch_fs *c,
!(inode->ei_inode.bi_fields_set & (1 << id)))
continue;
__bch2_xattr_emit(prefix,
bch2_inode_opts[id],
strlen(bch2_inode_opts[id]),
buffer, buffer_size, ret);
if (*ret < 0)
ret = __bch2_xattr_emit(prefix, bch2_inode_opts[id],
strlen(bch2_inode_opts[id]), buf);
if (ret)
break;
}
return ret;
}
ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
......@@ -274,13 +273,14 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
struct xattr_buf buf = { .buf = buffer, .len = buffer_size };
u64 inum = dentry->d_inode->i_ino;
ssize_t ret = 0;
int ret;
bch2_trans_init(&trans, c);
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS,
POS(inum, 0), 0, k) {
POS(inum, 0), 0, k, ret) {
BUG_ON(k.k->p.inode < inum);
if (k.k->p.inode > inum)
......@@ -289,27 +289,24 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
if (k.k->type != KEY_TYPE_xattr)
continue;
bch2_xattr_emit(dentry, bkey_s_c_to_xattr(k).v,
&buffer, &buffer_size, &ret);
if (ret < 0)
ret = bch2_xattr_emit(dentry, bkey_s_c_to_xattr(k).v, &buf);
if (ret)
break;
}
bch2_trans_exit(&trans);
ret = bch2_trans_exit(&trans) ?: ret;
if (ret < 0)
if (ret)
return ret;
bch2_xattr_list_bcachefs(c, inode, &buffer,
&buffer_size, &ret, false);
if (ret < 0)
ret = bch2_xattr_list_bcachefs(c, inode, &buf, false);
if (ret)
return ret;
bch2_xattr_list_bcachefs(c, inode, &buffer,
&buffer_size, &ret, true);
if (ret < 0)
ret = bch2_xattr_list_bcachefs(c, inode, &buf, true);
if (ret)
return ret;
return ret;
return buf.used;
}
static int bch2_xattr_get_handler(const struct xattr_handler *handler,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment