Commit 2cbe5cfe authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Rework calling convention for marking overwrites

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent b3fce09c
...@@ -232,7 +232,7 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys) ...@@ -232,7 +232,7 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret) for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret)
bch2_mark_key(c, k, 0, NULL, 0, bch2_mark_key(c, k, 0, 0, NULL, 0,
BCH_BUCKET_MARK_ALLOC_READ| BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC); BCH_BUCKET_MARK_NOATOMIC);
...@@ -244,7 +244,8 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys) ...@@ -244,7 +244,8 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
for_each_journal_key(*journal_keys, j) for_each_journal_key(*journal_keys, j)
if (j->btree_id == BTREE_ID_ALLOC) if (j->btree_id == BTREE_ID_ALLOC)
bch2_mark_key(c, bkey_i_to_s_c(j->k), 0, NULL, 0, bch2_mark_key(c, bkey_i_to_s_c(j->k),
0, 0, NULL, 0,
BCH_BUCKET_MARK_ALLOC_READ| BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC); BCH_BUCKET_MARK_NOATOMIC);
......
...@@ -173,7 +173,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k, ...@@ -173,7 +173,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
*max_stale = max(*max_stale, ptr_stale(ca, ptr)); *max_stale = max(*max_stale, ptr_stale(ca, ptr));
} }
bch2_mark_key(c, k, k.k->size, NULL, 0, flags); bch2_mark_key(c, k, 0, k.k->size, NULL, 0, flags);
fsck_err: fsck_err:
return ret; return ret;
} }
...@@ -420,7 +420,8 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c) ...@@ -420,7 +420,8 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
for_each_pending_btree_node_free(c, as, d) for_each_pending_btree_node_free(c, as, d)
if (d->index_update_done) if (d->index_update_done)
bch2_mark_key(c, bkey_i_to_s_c(&d->key), 0, NULL, 0, bch2_mark_key(c, bkey_i_to_s_c(&d->key),
0, 0, NULL, 0,
BCH_BUCKET_MARK_GC); BCH_BUCKET_MARK_GC);
mutex_unlock(&c->btree_interior_update_lock); mutex_unlock(&c->btree_interior_update_lock);
......
...@@ -194,7 +194,7 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b, ...@@ -194,7 +194,7 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b,
: gc_pos_btree_root(as->btree_id)) >= 0 && : gc_pos_btree_root(as->btree_id)) >= 0 &&
gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0)
bch2_mark_key_locked(c, bkey_i_to_s_c(&d->key), bch2_mark_key_locked(c, bkey_i_to_s_c(&d->key),
0, NULL, 0, 0, 0, NULL, 0,
BCH_BUCKET_MARK_OVERWRITE| BCH_BUCKET_MARK_OVERWRITE|
BCH_BUCKET_MARK_GC); BCH_BUCKET_MARK_GC);
} }
...@@ -266,11 +266,12 @@ static void bch2_btree_node_free_ondisk(struct bch_fs *c, ...@@ -266,11 +266,12 @@ static void bch2_btree_node_free_ondisk(struct bch_fs *c,
{ {
BUG_ON(!pending->index_update_done); BUG_ON(!pending->index_update_done);
bch2_mark_key(c, bkey_i_to_s_c(&pending->key), 0, NULL, 0, bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
BCH_BUCKET_MARK_OVERWRITE); 0, 0, NULL, 0, BCH_BUCKET_MARK_OVERWRITE);
if (gc_visited(c, gc_phase(GC_PHASE_PENDING_DELETE))) if (gc_visited(c, gc_phase(GC_PHASE_PENDING_DELETE)))
bch2_mark_key(c, bkey_i_to_s_c(&pending->key), 0, NULL, 0, bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
0, 0, NULL, 0,
BCH_BUCKET_MARK_OVERWRITE| BCH_BUCKET_MARK_OVERWRITE|
BCH_BUCKET_MARK_GC); BCH_BUCKET_MARK_GC);
} }
...@@ -1077,11 +1078,11 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b) ...@@ -1077,11 +1078,11 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
fs_usage = bch2_fs_usage_scratch_get(c); fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key), bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
0, &fs_usage->u, 0, 0, 0, &fs_usage->u, 0,
BCH_BUCKET_MARK_INSERT); BCH_BUCKET_MARK_INSERT);
if (gc_visited(c, gc_pos_btree_root(b->c.btree_id))) if (gc_visited(c, gc_pos_btree_root(b->c.btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key), bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
0, NULL, 0, 0, 0, NULL, 0,
BCH_BUCKET_MARK_INSERT| BCH_BUCKET_MARK_INSERT|
BCH_BUCKET_MARK_GC); BCH_BUCKET_MARK_GC);
...@@ -1175,12 +1176,12 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b ...@@ -1175,12 +1176,12 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
fs_usage = bch2_fs_usage_scratch_get(c); fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(insert), bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
0, &fs_usage->u, 0, 0, 0, &fs_usage->u, 0,
BCH_BUCKET_MARK_INSERT); BCH_BUCKET_MARK_INSERT);
if (gc_visited(c, gc_pos_btree_node(b))) if (gc_visited(c, gc_pos_btree_node(b)))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert), bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
0, NULL, 0, 0, 0, NULL, 0,
BCH_BUCKET_MARK_INSERT| BCH_BUCKET_MARK_INSERT|
BCH_BUCKET_MARK_GC); BCH_BUCKET_MARK_GC);
...@@ -2003,11 +2004,11 @@ static void __bch2_btree_node_update_key(struct bch_fs *c, ...@@ -2003,11 +2004,11 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
fs_usage = bch2_fs_usage_scratch_get(c); fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i), bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
0, &fs_usage->u, 0, 0, 0, &fs_usage->u, 0,
BCH_BUCKET_MARK_INSERT); BCH_BUCKET_MARK_INSERT);
if (gc_visited(c, gc_pos_btree_root(b->c.btree_id))) if (gc_visited(c, gc_pos_btree_root(b->c.btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i), bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
0, NULL, 0, 0, 0, NULL, 0,
BCH_BUCKET_MARK_INSERT|| BCH_BUCKET_MARK_INSERT||
BCH_BUCKET_MARK_GC); BCH_BUCKET_MARK_GC);
......
...@@ -811,23 +811,24 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -811,23 +811,24 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
} }
static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p, static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
s64 delta) unsigned offset, s64 delta,
unsigned flags)
{ {
if (delta > 0) { if (flags & BCH_BUCKET_MARK_OVERWRITE_SPLIT) {
/* BUG_ON(offset + -delta > p.crc.live_size);
* marking a new extent, which _will have size_ @delta
* return -((s64) ptr_disk_sectors(p)) +
* in the bch2_mark_update -> BCH_EXTENT_OVERLAP_MIDDLE __ptr_disk_sectors(p, offset) +
* case, we haven't actually created the key we'll be inserting __ptr_disk_sectors(p, p.crc.live_size -
* yet (for the split) - so we don't want to be using offset + delta);
* k->size/crc.live_size here: } else if (flags & BCH_BUCKET_MARK_OVERWRITE) {
*/ BUG_ON(offset + -delta > p.crc.live_size);
return __ptr_disk_sectors(p, delta);
return -((s64) ptr_disk_sectors(p)) +
__ptr_disk_sectors(p, p.crc.live_size +
delta);
} else { } else {
BUG_ON(-delta > p.crc.live_size); return ptr_disk_sectors(p);
return (s64) __ptr_disk_sectors(p, p.crc.live_size + delta) -
(s64) ptr_disk_sectors(p);
} }
} }
...@@ -1006,7 +1007,8 @@ static int bch2_mark_stripe_ptr(struct bch_fs *c, ...@@ -1006,7 +1007,8 @@ static int bch2_mark_stripe_ptr(struct bch_fs *c,
} }
static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k, static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
s64 sectors, enum bch_data_type data_type, unsigned offset, s64 sectors,
enum bch_data_type data_type,
struct bch_fs_usage *fs_usage, struct bch_fs_usage *fs_usage,
unsigned journal_seq, unsigned flags) unsigned journal_seq, unsigned flags)
{ {
...@@ -1027,7 +1029,7 @@ static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k, ...@@ -1027,7 +1029,7 @@ static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
s64 disk_sectors = data_type == BCH_DATA_BTREE s64 disk_sectors = data_type == BCH_DATA_BTREE
? sectors ? sectors
: ptr_disk_sectors_delta(p, sectors); : ptr_disk_sectors_delta(p, offset, sectors, flags);
bool stale = bch2_mark_pointer(c, p, disk_sectors, data_type, bool stale = bch2_mark_pointer(c, p, disk_sectors, data_type,
fs_usage, journal_seq, flags); fs_usage, journal_seq, flags);
...@@ -1116,7 +1118,8 @@ static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k, ...@@ -1116,7 +1118,8 @@ static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
} }
int bch2_mark_key_locked(struct bch_fs *c, int bch2_mark_key_locked(struct bch_fs *c,
struct bkey_s_c k, s64 sectors, struct bkey_s_c k,
unsigned offset, s64 sectors,
struct bch_fs_usage *fs_usage, struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags) u64 journal_seq, unsigned flags)
{ {
...@@ -1137,11 +1140,11 @@ int bch2_mark_key_locked(struct bch_fs *c, ...@@ -1137,11 +1140,11 @@ int bch2_mark_key_locked(struct bch_fs *c,
? c->opts.btree_node_size ? c->opts.btree_node_size
: -c->opts.btree_node_size; : -c->opts.btree_node_size;
ret = bch2_mark_extent(c, k, sectors, BCH_DATA_BTREE, ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_BTREE,
fs_usage, journal_seq, flags); fs_usage, journal_seq, flags);
break; break;
case KEY_TYPE_extent: case KEY_TYPE_extent:
ret = bch2_mark_extent(c, k, sectors, BCH_DATA_USER, ret = bch2_mark_extent(c, k, offset, sectors, BCH_DATA_USER,
fs_usage, journal_seq, flags); fs_usage, journal_seq, flags);
break; break;
case KEY_TYPE_stripe: case KEY_TYPE_stripe:
...@@ -1172,14 +1175,14 @@ int bch2_mark_key_locked(struct bch_fs *c, ...@@ -1172,14 +1175,14 @@ int bch2_mark_key_locked(struct bch_fs *c,
} }
int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
s64 sectors, unsigned offset, s64 sectors,
struct bch_fs_usage *fs_usage, struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags) u64 journal_seq, unsigned flags)
{ {
int ret; int ret;
percpu_down_read(&c->mark_lock); percpu_down_read(&c->mark_lock);
ret = bch2_mark_key_locked(c, k, sectors, ret = bch2_mark_key_locked(c, k, offset, sectors,
fs_usage, journal_seq, flags); fs_usage, journal_seq, flags);
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
...@@ -1195,8 +1198,11 @@ inline int bch2_mark_overwrite(struct btree_trans *trans, ...@@ -1195,8 +1198,11 @@ inline int bch2_mark_overwrite(struct btree_trans *trans,
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree *b = iter->l[0].b; struct btree *b = iter->l[0].b;
unsigned offset = 0;
s64 sectors = 0; s64 sectors = 0;
flags |= BCH_BUCKET_MARK_OVERWRITE;
if (btree_node_is_extents(b) if (btree_node_is_extents(b)
? bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0 ? bkey_cmp(new->k.p, bkey_start_pos(old.k)) <= 0
: bkey_cmp(new->k.p, old.k->p)) : bkey_cmp(new->k.p, old.k->p))
...@@ -1205,35 +1211,33 @@ inline int bch2_mark_overwrite(struct btree_trans *trans, ...@@ -1205,35 +1211,33 @@ inline int bch2_mark_overwrite(struct btree_trans *trans,
if (btree_node_is_extents(b)) { if (btree_node_is_extents(b)) {
switch (bch2_extent_overlap(&new->k, old.k)) { switch (bch2_extent_overlap(&new->k, old.k)) {
case BCH_EXTENT_OVERLAP_ALL: case BCH_EXTENT_OVERLAP_ALL:
offset = 0;
sectors = -((s64) old.k->size); sectors = -((s64) old.k->size);
break; break;
case BCH_EXTENT_OVERLAP_BACK: case BCH_EXTENT_OVERLAP_BACK:
offset = bkey_start_offset(&new->k) -
bkey_start_offset(old.k);
sectors = bkey_start_offset(&new->k) - sectors = bkey_start_offset(&new->k) -
old.k->p.offset; old.k->p.offset;
break; break;
case BCH_EXTENT_OVERLAP_FRONT: case BCH_EXTENT_OVERLAP_FRONT:
offset = 0;
sectors = bkey_start_offset(old.k) - sectors = bkey_start_offset(old.k) -
new->k.p.offset; new->k.p.offset;
break; break;
case BCH_EXTENT_OVERLAP_MIDDLE: case BCH_EXTENT_OVERLAP_MIDDLE:
sectors = old.k->p.offset - new->k.p.offset; offset = bkey_start_offset(&new->k) -
BUG_ON(sectors <= 0); bkey_start_offset(old.k);
sectors = -((s64) new->k.size);
bch2_mark_key_locked(c, old, sectors, flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
fs_usage, trans->journal_res.seq,
BCH_BUCKET_MARK_INSERT|flags);
sectors = bkey_start_offset(&new->k) -
old.k->p.offset;
break; break;
} }
BUG_ON(sectors >= 0); BUG_ON(sectors >= 0);
} }
return bch2_mark_key_locked(c, old, sectors, fs_usage, return bch2_mark_key_locked(c, old, offset, sectors, fs_usage,
trans->journal_res.seq, trans->journal_res.seq, flags) ?: 1;
BCH_BUCKET_MARK_OVERWRITE|flags) ?: 1;
} }
int bch2_mark_update(struct btree_trans *trans, int bch2_mark_update(struct btree_trans *trans,
...@@ -1251,10 +1255,12 @@ int bch2_mark_update(struct btree_trans *trans, ...@@ -1251,10 +1255,12 @@ int bch2_mark_update(struct btree_trans *trans,
if (!btree_node_type_needs_gc(iter->btree_id)) if (!btree_node_type_needs_gc(iter->btree_id))
return 0; return 0;
EBUG_ON(btree_node_is_extents(b) &&
!bch2_extent_is_atomic(insert->k, insert->iter));
if (!(trans->flags & BTREE_INSERT_NOMARK_INSERT)) if (!(trans->flags & BTREE_INSERT_NOMARK_INSERT))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k),
bpos_min(insert->k->k.p, b->key.k.p).offset - 0, insert->k->k.size,
bkey_start_offset(&insert->k->k),
fs_usage, trans->journal_res.seq, fs_usage, trans->journal_res.seq,
BCH_BUCKET_MARK_INSERT|flags); BCH_BUCKET_MARK_INSERT|flags);
...@@ -1519,8 +1525,9 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans, ...@@ -1519,8 +1525,9 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
} }
static int bch2_trans_mark_extent(struct btree_trans *trans, static int bch2_trans_mark_extent(struct btree_trans *trans,
struct bkey_s_c k, struct bkey_s_c k, unsigned offset,
s64 sectors, enum bch_data_type data_type) s64 sectors, unsigned flags,
enum bch_data_type data_type)
{ {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry; const union bch_extent_entry *entry;
...@@ -1540,7 +1547,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans, ...@@ -1540,7 +1547,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans,
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
s64 disk_sectors = data_type == BCH_DATA_BTREE s64 disk_sectors = data_type == BCH_DATA_BTREE
? sectors ? sectors
: ptr_disk_sectors_delta(p, sectors); : ptr_disk_sectors_delta(p, offset, sectors, flags);
ret = bch2_trans_mark_pointer(trans, p, disk_sectors, ret = bch2_trans_mark_pointer(trans, p, disk_sectors,
data_type); data_type);
...@@ -1575,7 +1582,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans, ...@@ -1575,7 +1582,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans,
} }
int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k, int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
s64 sectors, unsigned flags) unsigned offset, s64 sectors, unsigned flags)
{ {
struct replicas_delta_list *d; struct replicas_delta_list *d;
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
...@@ -1586,11 +1593,11 @@ int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k, ...@@ -1586,11 +1593,11 @@ int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
? c->opts.btree_node_size ? c->opts.btree_node_size
: -c->opts.btree_node_size; : -c->opts.btree_node_size;
return bch2_trans_mark_extent(trans, k, sectors, return bch2_trans_mark_extent(trans, k, offset, sectors,
BCH_DATA_BTREE); flags, BCH_DATA_BTREE);
case KEY_TYPE_extent: case KEY_TYPE_extent:
return bch2_trans_mark_extent(trans, k, sectors, return bch2_trans_mark_extent(trans, k, offset, sectors,
BCH_DATA_USER); flags, BCH_DATA_USER);
case KEY_TYPE_inode: case KEY_TYPE_inode:
d = replicas_deltas_realloc(trans, 0); d = replicas_deltas_realloc(trans, 0);
...@@ -1629,11 +1636,11 @@ int bch2_trans_mark_update(struct btree_trans *trans, ...@@ -1629,11 +1636,11 @@ int bch2_trans_mark_update(struct btree_trans *trans,
if (!btree_node_type_needs_gc(iter->btree_id)) if (!btree_node_type_needs_gc(iter->btree_id))
return 0; return 0;
ret = bch2_trans_mark_key(trans, EBUG_ON(btree_node_is_extents(b) &&
bkey_i_to_s_c(insert), !bch2_extent_is_atomic(insert, iter));
bpos_min(insert->k.p, b->key.k.p).offset -
bkey_start_offset(&insert->k), ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
BCH_BUCKET_MARK_INSERT); 0, insert->k.size, BCH_BUCKET_MARK_INSERT);
if (ret) if (ret)
return ret; return ret;
...@@ -1641,7 +1648,9 @@ int bch2_trans_mark_update(struct btree_trans *trans, ...@@ -1641,7 +1648,9 @@ int bch2_trans_mark_update(struct btree_trans *trans,
KEY_TYPE_discard))) { KEY_TYPE_discard))) {
struct bkey unpacked; struct bkey unpacked;
struct bkey_s_c k; struct bkey_s_c k;
unsigned offset = 0;
s64 sectors = 0; s64 sectors = 0;
unsigned flags = BCH_BUCKET_MARK_OVERWRITE;
k = bkey_disassemble(b, _k, &unpacked); k = bkey_disassemble(b, _k, &unpacked);
...@@ -1653,35 +1662,32 @@ int bch2_trans_mark_update(struct btree_trans *trans, ...@@ -1653,35 +1662,32 @@ int bch2_trans_mark_update(struct btree_trans *trans,
if (btree_node_is_extents(b)) { if (btree_node_is_extents(b)) {
switch (bch2_extent_overlap(&insert->k, k.k)) { switch (bch2_extent_overlap(&insert->k, k.k)) {
case BCH_EXTENT_OVERLAP_ALL: case BCH_EXTENT_OVERLAP_ALL:
offset = 0;
sectors = -((s64) k.k->size); sectors = -((s64) k.k->size);
break; break;
case BCH_EXTENT_OVERLAP_BACK: case BCH_EXTENT_OVERLAP_BACK:
offset = bkey_start_offset(&insert->k) -
bkey_start_offset(k.k);
sectors = bkey_start_offset(&insert->k) - sectors = bkey_start_offset(&insert->k) -
k.k->p.offset; k.k->p.offset;
break; break;
case BCH_EXTENT_OVERLAP_FRONT: case BCH_EXTENT_OVERLAP_FRONT:
offset = 0;
sectors = bkey_start_offset(k.k) - sectors = bkey_start_offset(k.k) -
insert->k.p.offset; insert->k.p.offset;
break; break;
case BCH_EXTENT_OVERLAP_MIDDLE: case BCH_EXTENT_OVERLAP_MIDDLE:
sectors = k.k->p.offset - insert->k.p.offset; offset = bkey_start_offset(&insert->k) -
BUG_ON(sectors <= 0); bkey_start_offset(k.k);
sectors = -((s64) insert->k.size);
ret = bch2_trans_mark_key(trans, k, sectors, flags |= BCH_BUCKET_MARK_OVERWRITE_SPLIT;
BCH_BUCKET_MARK_INSERT);
if (ret)
return ret;
sectors = bkey_start_offset(&insert->k) -
k.k->p.offset;
break; break;
} }
BUG_ON(sectors >= 0); BUG_ON(sectors >= 0);
} }
ret = bch2_trans_mark_key(trans, k, sectors, ret = bch2_trans_mark_key(trans, k, offset, sectors, flags);
BCH_BUCKET_MARK_OVERWRITE);
if (ret) if (ret)
return ret; return ret;
......
...@@ -251,14 +251,15 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *, ...@@ -251,14 +251,15 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
#define BCH_BUCKET_MARK_INSERT (1 << 0) #define BCH_BUCKET_MARK_INSERT (1 << 0)
#define BCH_BUCKET_MARK_OVERWRITE (1 << 1) #define BCH_BUCKET_MARK_OVERWRITE (1 << 1)
#define BCH_BUCKET_MARK_BUCKET_INVALIDATE (1 << 2) #define BCH_BUCKET_MARK_OVERWRITE_SPLIT (1 << 2)
#define BCH_BUCKET_MARK_GC (1 << 3) #define BCH_BUCKET_MARK_BUCKET_INVALIDATE (1 << 3)
#define BCH_BUCKET_MARK_ALLOC_READ (1 << 4) #define BCH_BUCKET_MARK_GC (1 << 4)
#define BCH_BUCKET_MARK_NOATOMIC (1 << 5) #define BCH_BUCKET_MARK_ALLOC_READ (1 << 5)
#define BCH_BUCKET_MARK_NOATOMIC (1 << 6)
int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c, s64, int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c, unsigned, s64,
struct bch_fs_usage *, u64, unsigned); struct bch_fs_usage *, u64, unsigned);
int bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64, int bch2_mark_key(struct bch_fs *, struct bkey_s_c, unsigned, s64,
struct bch_fs_usage *, u64, unsigned); struct bch_fs_usage *, u64, unsigned);
int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage_online *, int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage_online *,
struct disk_reservation *, unsigned); struct disk_reservation *, unsigned);
...@@ -272,7 +273,8 @@ int bch2_mark_update(struct btree_trans *, struct btree_insert_entry *, ...@@ -272,7 +273,8 @@ int bch2_mark_update(struct btree_trans *, struct btree_insert_entry *,
void bch2_replicas_delta_list_apply(struct bch_fs *, void bch2_replicas_delta_list_apply(struct bch_fs *,
struct bch_fs_usage *, struct bch_fs_usage *,
struct replicas_delta_list *); struct replicas_delta_list *);
int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, s64, unsigned); int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c,
unsigned, s64, unsigned);
int bch2_trans_mark_update(struct btree_trans *, int bch2_trans_mark_update(struct btree_trans *,
struct btree_iter *iter, struct btree_iter *iter,
struct bkey_i *insert); struct bkey_i *insert);
......
...@@ -1312,7 +1312,7 @@ int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys) ...@@ -1312,7 +1312,7 @@ int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
break; break;
} }
bch2_mark_key(c, k, 0, NULL, 0, bch2_mark_key(c, k, 0, 0, NULL, 0,
BCH_BUCKET_MARK_ALLOC_READ| BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC); BCH_BUCKET_MARK_NOATOMIC);
} }
......
...@@ -295,7 +295,7 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k) ...@@ -295,7 +295,7 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
if (split_compressed) { if (split_compressed) {
ret = bch2_trans_mark_key(&trans, bkey_i_to_s_c(k), ret = bch2_trans_mark_key(&trans, bkey_i_to_s_c(k),
-((s64) k->k.size), 0, -((s64) k->k.size),
BCH_BUCKET_MARK_OVERWRITE) ?: BCH_BUCKET_MARK_OVERWRITE) ?:
bch2_trans_commit(&trans, &disk_res, NULL, bch2_trans_commit(&trans, &disk_res, NULL,
BTREE_INSERT_ATOMIC| BTREE_INSERT_ATOMIC|
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment