Commit e2d9912c authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: bch2_extent_trim_atomic()

Prep work for extents insert hook removal
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 581edb63
......@@ -1298,6 +1298,17 @@ extent_insert_advance_pos(struct extent_insert_state *s, struct bkey_s_c k)
return __extent_insert_advance_pos(s, next_pos, k);
}
void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
{
struct btree *b = iter->l[0].b;
BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
bch2_cut_back(b->key.k.p, &k->k);
BUG_ON(bkey_cmp(bkey_start_pos(&k->k), b->data->min_key) < 0);
}
enum btree_insert_ret
bch2_extent_can_insert(struct btree_insert *trans,
struct btree_insert_entry *insert,
......@@ -1311,6 +1322,9 @@ bch2_extent_can_insert(struct btree_insert *trans,
struct bkey_s_c k;
int sectors;
BUG_ON(trans->flags & BTREE_INSERT_ATOMIC &&
!bch2_extent_is_atomic(&insert->k->k, insert->iter));
/*
* We avoid creating whiteouts whenever possible when deleting, but
* those optimizations mean we may potentially insert two whiteouts
......
......@@ -62,6 +62,17 @@ int bch2_extent_pick_ptr(struct bch_fs *, struct bkey_s_c,
struct bch_devs_mask *,
struct extent_pick_ptr *);
void bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *);
static inline bool bch2_extent_is_atomic(struct bkey *k,
struct btree_iter *iter)
{
struct btree *b = iter->l[0].b;
return bkey_cmp(k->p, b->key.k.p) <= 0 &&
bkey_cmp(bkey_start_pos(k), b->data->min_key) >= 0;
}
enum btree_insert_ret
bch2_extent_can_insert(struct btree_insert *, struct btree_insert_entry *,
unsigned *);
......
......@@ -6,6 +6,7 @@
#include "buckets.h"
#include "clock.h"
#include "error.h"
#include "extents.h"
#include "fs.h"
#include "fs-io.h"
#include "fsck.h"
......@@ -430,17 +431,22 @@ static int bchfs_write_index_update(struct bch_write_op *wop)
hook.need_inode_update = false;
do {
/* XXX: inode->i_size locking */
k = bch2_keylist_front(keys);
if (min(k->k.p.offset << 9, op->new_i_size) >
op->inode->ei_inode.bi_size)
hook.need_inode_update = true;
BKEY_PADDED(k) tmp;
/* optimization for fewer transaction restarts: */
ret = bch2_btree_iter_traverse(extent_iter);
if (ret)
goto err;
bkey_copy(&tmp.k, bch2_keylist_front(keys));
k = &tmp.k;
bch2_extent_trim_atomic(k, extent_iter);
/* XXX: inode->i_size locking */
if (min(k->k.p.offset << 9, op->new_i_size) >
op->inode->ei_inode.bi_size)
hook.need_inode_update = true;
if (hook.need_inode_update) {
struct bkey_s_c inode;
......@@ -515,8 +521,10 @@ static int bchfs_write_index_update(struct bch_write_op *wop)
if (hook.need_inode_update)
op->inode->ei_inode = hook.inode_u;
BUG_ON(bkey_cmp(extent_iter->pos, k->k.p) < 0);
bch2_keylist_pop_front(keys);
if (bkey_cmp(extent_iter->pos, bch2_keylist_front(keys)->k.p) < 0)
bch2_cut_front(extent_iter->pos, bch2_keylist_front(keys));
else
bch2_keylist_pop_front(keys);
} while (!bch2_keylist_empty(keys));
bch2_trans_exit(&trans);
......@@ -2458,6 +2466,12 @@ static long bch2_fcollapse(struct bch_inode_info *inode,
bch2_cut_front(src->pos, &copy.k);
copy.k.k.p.offset -= len >> 9;
ret = bch2_btree_iter_traverse(dst);
if (ret)
goto btree_iter_err;
bch2_extent_trim_atomic(&copy.k, dst);
BUG_ON(bkey_cmp(dst->pos, bkey_start_pos(&copy.k.k)));
ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment