Commit 2a9101a9 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Refactor bch2_trans_commit() path

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 8f196539
......@@ -301,7 +301,6 @@ do { \
x(btree_node_sort) \
x(btree_node_read) \
x(btree_gc) \
x(btree_update) \
x(btree_lock_contended_read) \
x(btree_lock_contended_intent) \
x(btree_lock_contended_write) \
......
......@@ -62,10 +62,10 @@ bool __bch2_compact_whiteouts(struct bch_fs *, struct btree *, enum compact_mode
static inline unsigned should_compact_bset_lazy(struct btree *b, struct bset_tree *t)
{
unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
unsigned dead_u64s = bset_u64s - b->nr.bset_u64s[t - b->set];
unsigned total_u64s = bset_u64s(t);
unsigned dead_u64s = total_u64s - b->nr.bset_u64s[t - b->set];
return dead_u64s > 128 && dead_u64s * 3 > bset_u64s;
return dead_u64s > 64 && dead_u64s * 3 > total_u64s;
}
static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b)
......
......@@ -48,6 +48,11 @@ static inline int btree_iter_err(const struct btree_iter *iter)
/* Iterate over iters within a transaction: */
#define trans_for_each_iter_all(_trans, _iter) \
for (_iter = (_trans)->iters; \
_iter < (_trans)->iters + (_trans)->nr_iters; \
_iter++)
static inline struct btree_iter *
__trans_next_iter(struct btree_trans *trans, unsigned idx)
{
......
......@@ -255,7 +255,6 @@ struct btree_insert_entry {
struct btree_trans {
struct bch_fs *c;
unsigned long ip;
u64 commit_start;
u64 iters_linked;
u64 iters_live;
......@@ -283,12 +282,11 @@ struct btree_trans {
struct disk_reservation *disk_res;
unsigned flags;
unsigned journal_u64s;
struct replicas_delta_list *fs_usage_deltas;
struct btree_iter iters_onstack[2];
struct btree_insert_entry updates_onstack[6];
u8 updates_sorted_onstack[6];
struct replicas_delta_list *fs_usage_deltas;
};
#define BTREE_FLAG(flag) \
......@@ -420,6 +418,12 @@ static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
__btree_node_offset_to_key(_b, (_t)->end_offset); \
})
static inline unsigned bset_u64s(struct bset_tree *t)
{
return t->end_offset - t->data_offset -
sizeof(struct bset) / sizeof(u64);
}
static inline unsigned bset_byte_offset(struct btree *b, void *i)
{
return i - (void *) b->data;
......
......@@ -93,9 +93,30 @@ int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *,
int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *,
struct btree *, struct bkey_i_btree_ptr *);
int bch2_trans_commit(struct btree_trans *,
struct disk_reservation *,
u64 *, unsigned);
int __bch2_trans_commit(struct btree_trans *);
/**
* bch2_trans_commit - insert keys at given iterator positions
*
* This is main entry point for btree updates.
*
* Return values:
* -EINTR: locking changed, this function should be called again. Only returned
* if passed BTREE_INSERT_ATOMIC.
* -EROFS: filesystem read only
* -EIO: journal or btree node IO error
*/
static inline int bch2_trans_commit(struct btree_trans *trans,
struct disk_reservation *disk_res,
u64 *journal_seq,
unsigned flags)
{
trans->disk_res = disk_res;
trans->journal_seq = journal_seq;
trans->flags = flags;
return __bch2_trans_commit(trans);
}
static inline void bch2_trans_update(struct btree_trans *trans,
struct btree_iter *iter,
......
This diff is collapsed.
......@@ -2720,20 +2720,26 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
loff_t offset, loff_t len)
{
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
long ret;
if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
return bchfs_fallocate(inode, mode, offset, len);
if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
return bchfs_fpunch(inode, offset, len);
if (!percpu_ref_tryget(&c->writes))
return -EROFS;
if (mode == FALLOC_FL_INSERT_RANGE)
return bchfs_fcollapse_finsert(inode, offset, len, true);
if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
ret = bchfs_fallocate(inode, mode, offset, len);
else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
ret = bchfs_fpunch(inode, offset, len);
else if (mode == FALLOC_FL_INSERT_RANGE)
ret = bchfs_fcollapse_finsert(inode, offset, len, true);
else if (mode == FALLOC_FL_COLLAPSE_RANGE)
ret = bchfs_fcollapse_finsert(inode, offset, len, false);
else
ret = -EOPNOTSUPP;
if (mode == FALLOC_FL_COLLAPSE_RANGE)
return bchfs_fcollapse_finsert(inode, offset, len, false);
percpu_ref_put(&c->writes);
return -EOPNOTSUPP;
return ret;
}
static void mark_range_unallocated(struct bch_inode_info *inode,
......
......@@ -310,6 +310,7 @@ int bch2_extent_update(struct btree_trans *trans,
bch2_trans_update(trans, iter, k);
ret = bch2_trans_commit(trans, disk_res, journal_seq,
BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_ATOMIC|
BTREE_INSERT_USE_RESERVE);
......
......@@ -271,7 +271,7 @@ static inline void bch2_journal_res_put(struct journal *j,
if (!res->ref)
return;
lock_release(&j->res_map, _RET_IP_);
lock_release(&j->res_map, _THIS_IP_);
while (res->u64s)
bch2_journal_add_entry(j, res,
......
......@@ -166,6 +166,9 @@ s64 bch2_remap_range(struct bch_fs *c,
u64 src_done, dst_done;
int ret = 0, ret2 = 0;
if (!percpu_ref_tryget(&c->writes))
return -EROFS;
if (!(c->sb.features & (1ULL << BCH_FEATURE_REFLINK))) {
mutex_lock(&c->sb_lock);
if (!(c->sb.features & (1ULL << BCH_FEATURE_REFLINK))) {
......@@ -295,5 +298,7 @@ s64 bch2_remap_range(struct bch_fs *c,
ret = bch2_trans_exit(&trans) ?: ret;
percpu_ref_put(&c->writes);
return dst_done ?: ret ?: ret2;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment