Commit 20bceecb authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: More work to avoid transaction restarts

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 7d825866
......@@ -222,7 +222,7 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
struct bkey_s_c_xattr xattr;
struct posix_acl *acl = NULL;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
......@@ -305,7 +305,7 @@ int bch2_set_acl(struct mnt_idmap *idmap,
int ret;
mutex_lock(&inode->ei_update_lock);
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
if (type == ACL_TYPE_ACCESS && acl) {
ret = posix_acl_update_mode(idmap, &inode->v, &mode, &acl);
......
......@@ -229,7 +229,7 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
unsigned i;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret)
bch2_mark_key(c, k, true, 0, NULL, 0,
......@@ -288,7 +288,7 @@ int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
if (k->k.p.offset >= ca->mi.nbuckets)
return 0;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, k->k.p,
BTREE_ITER_INTENT);
......@@ -333,7 +333,7 @@ int bch2_alloc_write(struct bch_fs *c, unsigned flags, bool *wrote)
BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
......@@ -1032,7 +1032,7 @@ static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
u64 journal_seq = 0;
int ret = 0;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
POS(ca->dev_idx, 0),
......
......@@ -732,8 +732,7 @@ struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
goto retry;
trans_restart();
trace_trans_restart_btree_node_reused(c,
iter->trans->ip);
trace_trans_restart_btree_node_reused(iter->trans->ip);
return ERR_PTR(-EINTR);
}
}
......
......@@ -217,7 +217,7 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
u8 max_stale;
int ret = 0;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
......@@ -286,7 +286,7 @@ static int mark_journal_key(struct bch_fs *c, enum btree_id id,
if (ret)
return ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, id, bkey_start_pos(&insert->k),
BTREE_ITER_SLOTS, k, ret) {
......@@ -1055,7 +1055,7 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
struct btree *merge[GC_MERGE_NODES];
u32 lock_seq[GC_MERGE_NODES];
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
/*
* XXX: We don't have a good way of positively matching on sibling nodes
......
......@@ -1151,7 +1151,7 @@ static void bch2_btree_node_write_error(struct bch_fs *c,
struct btree_iter *iter;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
BTREE_MAX_DEPTH, b->c.level, 0);
......
......@@ -270,8 +270,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
if (unlikely(!ret)) {
trans_restart();
trace_trans_restart_would_deadlock(iter->trans->c,
iter->trans->ip);
trace_trans_restart_would_deadlock(iter->trans->ip);
return false;
}
......@@ -1667,7 +1666,7 @@ int bch2_trans_iter_free_on_commit(struct btree_trans *trans,
return ret;
}
int bch2_trans_realloc_iters(struct btree_trans *trans,
static int bch2_trans_realloc_iters(struct btree_trans *trans,
unsigned new_size)
{
void *new_iters, *new_updates;
......@@ -1715,18 +1714,13 @@ int bch2_trans_realloc_iters(struct btree_trans *trans,
if (trans->iters_live) {
trans_restart();
trace_trans_restart_iters_realloced(trans->c, trans->ip);
trace_trans_restart_iters_realloced(trans->ip, trans->size);
return -EINTR;
}
return 0;
}
void bch2_trans_preload_iters(struct btree_trans *trans)
{
bch2_trans_realloc_iters(trans, BTREE_ITER_MAX);
}
static int btree_trans_iter_alloc(struct btree_trans *trans)
{
unsigned idx = __ffs64(~trans->iters_linked);
......@@ -1866,32 +1860,41 @@ struct btree_iter *bch2_trans_copy_iter(struct btree_trans *trans,
return &trans->iters[idx];
}
void *bch2_trans_kmalloc(struct btree_trans *trans,
size_t size)
static int bch2_trans_preload_mem(struct btree_trans *trans, size_t size)
{
void *ret;
if (trans->mem_top + size > trans->mem_bytes) {
if (size > trans->mem_bytes) {
size_t old_bytes = trans->mem_bytes;
size_t new_bytes = roundup_pow_of_two(trans->mem_top + size);
size_t new_bytes = roundup_pow_of_two(size);
void *new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
if (!new_mem)
return ERR_PTR(-ENOMEM);
return -ENOMEM;
trans->mem = new_mem;
trans->mem_bytes = new_bytes;
if (old_bytes) {
trans_restart();
trace_trans_restart_mem_realloced(trans->c, trans->ip);
return ERR_PTR(-EINTR);
trace_trans_restart_mem_realloced(trans->ip, new_bytes);
return -EINTR;
}
}
ret = trans->mem + trans->mem_top;
return 0;
}
void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
{
void *p;
int ret;
ret = bch2_trans_preload_mem(trans, trans->mem_top + size);
if (ret)
return ERR_PTR(ret);
p = trans->mem + trans->mem_top;
trans->mem_top += size;
return ret;
return p;
}
inline void bch2_trans_unlink_iters(struct btree_trans *trans, u64 iters)
......@@ -1938,7 +1941,9 @@ void __bch2_trans_begin(struct btree_trans *trans)
bch2_btree_iter_traverse_all(trans);
}
void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c)
void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
unsigned expected_nr_iters,
size_t expected_mem_bytes)
{
memset(trans, 0, offsetof(struct btree_trans, iters_onstack));
......@@ -1947,6 +1952,12 @@ void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c)
trans->size = ARRAY_SIZE(trans->iters_onstack);
trans->iters = trans->iters_onstack;
trans->updates = trans->updates_onstack;
if (expected_nr_iters > trans->size)
bch2_trans_realloc_iters(trans, expected_nr_iters);
if (expected_mem_bytes)
bch2_trans_preload_mem(trans, expected_mem_bytes);
}
int bch2_trans_exit(struct btree_trans *trans)
......
......@@ -258,9 +258,6 @@ static inline int bkey_err(struct bkey_s_c k)
/* new multiple iterator interface: */
int bch2_trans_realloc_iters(struct btree_trans *, unsigned);
void bch2_trans_preload_iters(struct btree_trans *);
int bch2_trans_iter_put(struct btree_trans *, struct btree_iter *);
int bch2_trans_iter_free(struct btree_trans *, struct btree_iter *);
int bch2_trans_iter_free_on_commit(struct btree_trans *, struct btree_iter *);
......@@ -303,7 +300,7 @@ static inline void bch2_trans_begin_updates(struct btree_trans *trans)
}
void *bch2_trans_kmalloc(struct btree_trans *, size_t);
void bch2_trans_init(struct btree_trans *, struct bch_fs *);
void bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned, size_t);
int bch2_trans_exit(struct btree_trans *);
#ifdef TRACE_TRANSACTION_RESTARTS
......
......@@ -128,7 +128,7 @@ struct btree_insert_entry *bch2_trans_update(struct btree_trans *,
struct btree_trans trans; \
int _ret; \
\
bch2_trans_init(&trans, (_c)); \
bch2_trans_init(&trans, (_c), 0, 0); \
\
do { \
bch2_trans_begin(&trans); \
......
......@@ -1586,7 +1586,7 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
* instead of locking/reserving all the way to the root:
*/
if (!bch2_btree_iter_upgrade(iter, U8_MAX)) {
trace_trans_restart_iter_upgrade(c, iter->trans->ip);
trace_trans_restart_iter_upgrade(trans->ip);
ret = -EINTR;
goto out;
}
......
......@@ -440,7 +440,7 @@ static int bch2_trans_journal_preres_get(struct btree_trans *trans)
if (!bch2_trans_relock(trans)) {
trans_restart(" (iter relock after journal preres get blocked)");
trace_trans_restart_journal_preres_get(c, trans->ip);
trace_trans_restart_journal_preres_get(trans->ip);
return -EINTR;
}
......@@ -560,7 +560,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
ret = bch2_trans_mark_update(trans, i,
&trans->fs_usage_deltas);
if (ret == -EINTR)
trace_trans_restart_mark(c, trans->ip);
trace_trans_restart_mark(trans->ip);
if (ret)
return ret;
}
......@@ -570,7 +570,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
if (race_fault()) {
ret = -EINTR;
trans_restart(" (race)");
trace_trans_restart_fault_inject(c, trans->ip);
trace_trans_restart_fault_inject(trans->ip);
goto out;
}
......@@ -719,7 +719,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
ret == -EINTR ||
(flags & BTREE_INSERT_NOUNLOCK)) {
trans_restart(" (split)");
trace_trans_restart_btree_node_split(c, trans->ip);
trace_trans_restart_btree_node_split(trans->ip);
ret = -EINTR;
}
break;
......@@ -739,7 +739,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
return 0;
trans_restart(" (iter relock after marking replicas)");
trace_trans_restart_mark_replicas(c, trans->ip);
trace_trans_restart_mark_replicas(trans->ip);
ret = -EINTR;
break;
case BTREE_INSERT_NEED_JOURNAL_RES:
......@@ -753,7 +753,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
return 0;
trans_restart(" (iter relock after journal res get blocked)");
trace_trans_restart_journal_res_get(c, trans->ip);
trace_trans_restart_journal_res_get(trans->ip);
ret = -EINTR;
break;
default:
......@@ -766,7 +766,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
if (ret2) {
trans_restart(" (traverse)");
trace_trans_restart_traverse(c, trans->ip);
trace_trans_restart_traverse(trans->ip);
return ret2;
}
......@@ -778,7 +778,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
return 0;
trans_restart(" (atomic)");
trace_trans_restart_atomic(c, trans->ip);
trace_trans_restart_atomic(trans->ip);
}
return ret;
......@@ -809,7 +809,7 @@ static int __bch2_trans_commit(struct btree_trans *trans,
if (!bch2_btree_iter_upgrade(i->iter, 1)) {
trans_restart(" (failed upgrade, locks_want %u uptodate %u)",
old_locks_want, old_uptodate);
trace_trans_restart_upgrade(c, trans->ip);
trace_trans_restart_upgrade(trans->ip);
ret = -EINTR;
goto err;
}
......@@ -975,7 +975,9 @@ int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
struct btree_iter *iter;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
iter = bch2_trans_get_iter(&trans, id, bkey_start_pos(&k->k),
BTREE_ITER_INTENT);
......@@ -983,6 +985,8 @@ int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, k));
ret = bch2_trans_commit(&trans, disk_res, journal_seq, flags);
if (ret == -EINTR)
goto retry;
bch2_trans_exit(&trans);
return ret;
......@@ -1071,8 +1075,11 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
struct btree_iter *iter;
int ret = 0;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
/*
* XXX: whether we need mem/more iters depends on whether this btree id
* has triggers
*/
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
iter = bch2_trans_get_iter(&trans, id, start, BTREE_ITER_INTENT);
......
......@@ -221,7 +221,7 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
if (!i->size)
return i->ret;
bch2_trans_init(&trans, i->c);
bch2_trans_init(&trans, i->c, 0, 0);
iter = bch2_trans_get_iter(&trans, i->id, i->from, BTREE_ITER_PREFETCH);
k = bch2_btree_iter_peek(iter);
......@@ -275,7 +275,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
if (!i->size || !bkey_cmp(POS_MAX, i->from))
return i->ret;
bch2_trans_init(&trans, i->c);
bch2_trans_init(&trans, i->c, 0, 0);
for_each_btree_node(&trans, iter, i->id, i->from, 0, b) {
bch2_btree_node_to_text(&PBUF(i->buf), i->c, b);
......@@ -328,7 +328,7 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
if (!i->size)
return i->ret;
bch2_trans_init(&trans, i->c);
bch2_trans_init(&trans, i->c, 0, 0);
iter = bch2_trans_get_iter(&trans, i->id, i->from, BTREE_ITER_PREFETCH);
......
......@@ -313,7 +313,7 @@ u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum,
struct bkey_s_c k;
u64 inum = 0;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_hash_lookup(&trans, bch2_dirent_hash_desc,
hash_info, dir_inum, name, 0);
......@@ -370,7 +370,7 @@ int bch2_readdir(struct bch_fs *c, struct file *file,
if (!dir_emit_dots(file, ctx))
return 0;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
POS(inode->v.i_ino, ctx->pos), 0, k, ret) {
......
......@@ -441,7 +441,7 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
if (!buf)
return -ENOMEM;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EC,
POS(0, stripe_idx),
......@@ -698,7 +698,7 @@ static int ec_stripe_bkey_insert(struct bch_fs *c,
struct bkey_s_c k;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
......@@ -765,8 +765,7 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
BKEY_PADDED(k) tmp;
int ret = 0, dev, idx;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
bkey_start_pos(pos),
......@@ -1236,7 +1235,7 @@ int bch2_stripes_write(struct bch_fs *c, unsigned flags, bool *wrote)
new_key = kmalloc(255 * sizeof(u64), GFP_KERNEL);
BUG_ON(!new_key);
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
......@@ -1272,7 +1271,7 @@ int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
if (ret)
return ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EC, POS_MIN, 0, k, ret)
bch2_mark_key(c, k, true, 0, NULL, 0, 0);
......@@ -1299,7 +1298,7 @@ int bch2_ec_mem_alloc(struct bch_fs *c, bool gc)
size_t i, idx = 0;
int ret = 0;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS(0, U64_MAX), 0);
......
......@@ -1712,7 +1712,7 @@ bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
end.offset += size;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
BTREE_ITER_SLOTS, k, err) {
......
......@@ -435,8 +435,7 @@ static int bchfs_write_index_update(struct bch_write_op *wop)
BUG_ON(k->k.p.inode != inode->v.i_ino);
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
iter = bch2_trans_get_iter(&trans,
BTREE_ID_EXTENTS,
......@@ -1004,7 +1003,7 @@ void bch2_readahead(struct readahead_control *ractl)
ret = readpages_iter_init(&readpages_iter, ractl);
BUG_ON(ret);
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
BTREE_ITER_SLOTS);
......@@ -1049,7 +1048,7 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
bio_add_page_contig(&rbio->bio, page);
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
BTREE_ITER_SLOTS);
......@@ -2090,8 +2089,7 @@ static int __bch2_fpunch(struct bch_fs *c, struct bch_inode_info *inode,
struct bkey_s_c k;
int ret = 0;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, start,
BTREE_ITER_INTENT);
......@@ -2137,7 +2135,7 @@ static inline int range_has_data(struct bch_fs *c,
struct bkey_s_c k;
int ret = 0;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
......@@ -2394,8 +2392,7 @@ static long bch2_fcollapse(struct bch_inode_info *inode,
if ((offset | len) & (block_bytes(c) - 1))
return -EINVAL;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
/*
* We need i_mutex to keep the page cache consistent with the extents
......@@ -2510,8 +2507,7 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
unsigned replicas = io_opts(c, inode).data_replicas;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
inode_lock(&inode->v);
inode_dio_wait(&inode->v);
......@@ -2729,7 +2725,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
if (offset >= isize)
return -ENXIO;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
......@@ -2802,7 +2798,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
if (offset >= isize)
return -ENXIO;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(inode->v.i_ino, offset >> 9),
......
......@@ -215,7 +215,7 @@ int __must_check bch2_write_inode(struct bch_fs *c,
struct bch_inode_unpacked inode_u;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
......@@ -414,8 +414,7 @@ __bch2_create(struct mnt_idmap *idmap,
if (!tmpfile)
mutex_lock(&dir->ei_update_lock);
bch2_trans_init(&trans, c);
bch2_trans_realloc_iters(&trans, 8);
bch2_trans_init(&trans, c, 8, 1024);
retry:
bch2_trans_begin(&trans);
......@@ -572,7 +571,7 @@ static int __bch2_link(struct bch_fs *c,
int ret;
mutex_lock(&inode->ei_update_lock);
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 4, 1024);
retry:
bch2_trans_begin(&trans);
......@@ -659,7 +658,7 @@ static int bch2_unlink(struct inode *vdir, struct dentry *dentry)
int ret;
bch2_lock_inodes(dir, inode);
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 4, 1024);
retry:
bch2_trans_begin(&trans);
......@@ -870,13 +869,13 @@ static int bch2_rename2(struct mnt_idmap *idmap,
return ret;
}
bch2_trans_init(&trans, c, 8, 2048);
bch2_lock_inodes(i.src_dir,
i.dst_dir,
i.src_inode,
i.dst_inode);
bch2_trans_init(&trans, c);
if (S_ISDIR(i.src_inode->v.i_mode) &&
inode_attrs_changing(i.dst_dir, i.src_inode)) {
ret = -EXDEV;
......@@ -1045,7 +1044,7 @@ static int bch2_setattr_nonsize(struct mnt_idmap *idmap,
if (ret)
goto err;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
kfree(acl);
......@@ -1208,7 +1207,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
if (start + len < start)
return -EINVAL;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(ei->v.i_ino, start >> 9), 0, k, ret)
......
......@@ -451,8 +451,7 @@ static int check_extents(struct bch_fs *c)
u64 i_sectors;
int ret = 0;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
bch_verbose(c, "checking extents");
......@@ -547,8 +546,7 @@ static int check_dirents(struct bch_fs *c)
bch_verbose(c, "checking dirents");
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
hash_check_init(&h);
......@@ -704,8 +702,7 @@ static int check_xattrs(struct bch_fs *c)
hash_check_init(&h);
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS,
POS(BCACHEFS_ROOT_INO, 0), 0);
......@@ -918,8 +915,7 @@ static int check_directory_structure(struct bch_fs *c,
u64 d_inum;
int ret = 0;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
bch_verbose(c, "checking directory structure");
......@@ -1085,8 +1081,7 @@ static int bch2_gc_walk_dirents(struct bch_fs *c, nlink_table *links,
u64 d_inum;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
inc_link(c, links, range_start, range_end, BCACHEFS_ROOT_INO, false);
......@@ -1334,8 +1329,7 @@ static int bch2_gc_walk_inodes(struct bch_fs *c,
int ret = 0, ret2 = 0;
u64 nlinks_pos;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES,
POS(range_start, 0), 0);
......@@ -1459,8 +1453,7 @@ int bch2_fsck_walk_inodes_only(struct bch_fs *c)
struct bkey_s_c_inode inode;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN, 0, k, ret) {
if (k.k->type != KEY_TYPE_inode)
......
......@@ -391,7 +391,7 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
if (ret)
return ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS(inode_nr, 0),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
......
......@@ -302,7 +302,7 @@ int bch2_write_index_default(struct bch_write_op *op)
BUG_ON(bch2_keylist_empty(keys));
bch2_verify_keylist_sorted(keys);
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
bkey_start_pos(&bch2_keylist_front(keys)->k),
......@@ -1271,7 +1271,7 @@ static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio
flags &= ~BCH_READ_LAST_FRAGMENT;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
rbio->pos, BTREE_ITER_SLOTS);
......@@ -1319,7 +1319,7 @@ static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio,
struct bkey_s_c k;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
flags &= ~BCH_READ_LAST_FRAGMENT;
flags |= BCH_READ_MUST_CLONE;
......@@ -1428,7 +1428,7 @@ static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
if (rbio->pick.crc.compression_type)
return;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
......@@ -1868,7 +1868,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
BCH_READ_USER_MAPPED;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
BUG_ON(rbio->_state);
BUG_ON(flags & BCH_READ_NODECODE);
......
......@@ -258,7 +258,7 @@ void bch2_blacklist_entries_gc(struct work_struct *work)
unsigned i, nr, new_nr;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for (i = 0; i < BTREE_ID_NR; i++) {
struct btree_iter *iter;
......
......@@ -42,8 +42,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
BKEY_PADDED(key) tmp;
int ret = 0;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
POS_MIN, BTREE_ITER_PREFETCH);
......@@ -113,7 +112,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
if (flags & BCH_FORCE_IF_METADATA_LOST)
return -EINVAL;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
closure_init_stack(&cl);
for (id = 0; id < BTREE_ID_NR; id++) {
......
......@@ -61,8 +61,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
struct keylist *keys = &op->insert_keys;
int ret = 0;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
bkey_start_pos(&bch2_keylist_front(keys)->k),
......@@ -499,7 +498,7 @@ int bch2_move_data(struct bch_fs *c,
INIT_LIST_HEAD(&ctxt.reads);
init_waitqueue_head(&ctxt.wait);
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
stats->data_type = BCH_DATA_USER;
stats->btree_id = BTREE_ID_EXTENTS;
......@@ -633,7 +632,7 @@ static int bch2_move_btree(struct bch_fs *c,
enum data_cmd cmd;
int ret = 0;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
stats->data_type = BCH_DATA_BTREE;
......
......@@ -361,7 +361,7 @@ static int bch2_quota_init_type(struct bch_fs *c, enum quota_types type)
struct bkey_s_c k;
int ret = 0;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_QUOTAS, POS(type, 0),
BTREE_ITER_PREFETCH, k, ret) {
......@@ -433,7 +433,7 @@ int bch2_fs_quota_read(struct bch_fs *c)
return ret;
}
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
......@@ -726,7 +726,7 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
bkey_quota_init(&new_quota.k_i);
new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_QUOTAS, new_quota.k.p,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
......
......@@ -214,8 +214,7 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
bool split_compressed = false;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_preload_iters(&trans);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
retry:
bch2_trans_begin(&trans);
......
......@@ -263,7 +263,7 @@ static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
if (!test_bit(BCH_FS_STARTED, &c->flags))
return -EPERM;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN, 0, k, ret)
if (k.k->type == KEY_TYPE_extent) {
......
......@@ -35,7 +35,7 @@ static void test_delete(struct bch_fs *c, u64 nr)
bkey_cookie_init(&k.k_i);
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
BTREE_ITER_INTENT);
......@@ -67,7 +67,7 @@ static void test_delete_written(struct bch_fs *c, u64 nr)
bkey_cookie_init(&k.k_i);
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
BTREE_ITER_INTENT);
......@@ -95,7 +95,7 @@ static void test_iterate(struct bch_fs *c, u64 nr)
u64 i;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
delete_test_keys(c);
......@@ -140,7 +140,7 @@ static void test_iterate_extents(struct bch_fs *c, u64 nr)
u64 i;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
delete_test_keys(c);
......@@ -190,7 +190,7 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr)
u64 i;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
delete_test_keys(c);
......@@ -244,7 +244,7 @@ static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
u64 i;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
delete_test_keys(c);
......@@ -305,7 +305,7 @@ static void test_peek_end(struct bch_fs *c, u64 nr)
struct btree_iter *iter;
struct bkey_s_c k;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN, 0);
......@@ -324,7 +324,7 @@ static void test_peek_end_extents(struct bch_fs *c, u64 nr)
struct btree_iter *iter;
struct bkey_s_c k;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN, 0);
......@@ -430,7 +430,7 @@ static void rand_lookup(struct bch_fs *c, u64 nr)
struct bkey_s_c k;
u64 i;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for (i = 0; i < nr; i++) {
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
......@@ -451,7 +451,7 @@ static void rand_mixed(struct bch_fs *c, u64 nr)
int ret;
u64 i;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for (i = 0; i < nr; i++) {
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
......@@ -503,7 +503,7 @@ static void seq_insert(struct bch_fs *c, u64 nr)
bkey_cookie_init(&insert.k_i);
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
......@@ -526,7 +526,7 @@ static void seq_lookup(struct bch_fs *c, u64 nr)
struct bkey_s_c k;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k, ret)
;
......@@ -540,7 +540,7 @@ static void seq_overwrite(struct bch_fs *c, u64 nr)
struct bkey_s_c k;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
BTREE_ITER_INTENT, k, ret) {
......
......@@ -500,16 +500,14 @@ TRACE_EVENT(copygc,
);
DECLARE_EVENT_CLASS(transaction_restart,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip),
TP_PROTO(unsigned long ip),
TP_ARGS(ip),
TP_STRUCT__entry(
__array(char, name, 16)
__field(unsigned long, ip )
),
TP_fast_assign(
memcpy(__entry->name, c->name, 16);
__entry->ip = ip;
),
......@@ -517,73 +515,97 @@ DECLARE_EVENT_CLASS(transaction_restart,
);
DEFINE_EVENT(transaction_restart, trans_restart_btree_node_reused,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TP_PROTO(unsigned long ip),
TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_would_deadlock,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TP_PROTO(unsigned long ip),
TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_iters_realloced,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TRACE_EVENT(trans_restart_iters_realloced,
TP_PROTO(unsigned long ip, unsigned nr),
TP_ARGS(ip, nr),
TP_STRUCT__entry(
__field(unsigned long, ip )
__field(unsigned, nr )
),
TP_fast_assign(
__entry->ip = ip;
__entry->nr = nr;
),
TP_printk("%pS nr %u", (void *) __entry->ip, __entry->nr)
);
DEFINE_EVENT(transaction_restart, trans_restart_mem_realloced,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TRACE_EVENT(trans_restart_mem_realloced,
TP_PROTO(unsigned long ip, unsigned long bytes),
TP_ARGS(ip, bytes),
TP_STRUCT__entry(
__field(unsigned long, ip )
__field(unsigned long, bytes )
),
TP_fast_assign(
__entry->ip = ip;
__entry->bytes = bytes;
),
TP_printk("%pS bytes %lu", (void *) __entry->ip, __entry->bytes)
);
DEFINE_EVENT(transaction_restart, trans_restart_journal_res_get,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TP_PROTO(unsigned long ip),
TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_journal_preres_get,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TP_PROTO(unsigned long ip),
TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_mark_replicas,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TP_PROTO(unsigned long ip),
TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_fault_inject,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TP_PROTO(unsigned long ip),
TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_btree_node_split,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TP_PROTO(unsigned long ip),
TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_mark,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TP_PROTO(unsigned long ip),
TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_upgrade,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TP_PROTO(unsigned long ip),
TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_iter_upgrade,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TP_PROTO(unsigned long ip),
TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_traverse,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TP_PROTO(unsigned long ip),
TP_ARGS(ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_atomic,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip)
TP_PROTO(unsigned long ip),
TP_ARGS(ip)
);
DECLARE_EVENT_CLASS(node_lock_fail,
......
......@@ -126,7 +126,7 @@ int bch2_xattr_get(struct bch_fs *c, struct bch_inode_info *inode,
struct bkey_s_c_xattr xattr;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_hash_lookup(&trans, bch2_xattr_hash_desc,
&inode->ei_str_hash, inode->v.i_ino,
......@@ -277,7 +277,7 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
u64 inum = dentry->d_inode->i_ino;
int ret;
bch2_trans_init(&trans, c);
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS,
POS(inum, 0), 0, k, ret) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment