Commit b35b1925 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Move key marking out of extents.c

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent af9d3bc2
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "btree_io.h" #include "btree_io.h"
#include "btree_iter.h" #include "btree_iter.h"
#include "btree_locking.h" #include "btree_locking.h"
#include "buckets.h"
#include "debug.h" #include "debug.h"
#include "extents.h" #include "extents.h"
#include "journal.h" #include "journal.h"
...@@ -204,6 +205,8 @@ btree_insert_key_leaf(struct btree_insert *trans, ...@@ -204,6 +205,8 @@ btree_insert_key_leaf(struct btree_insert *trans,
int old_live_u64s = b->nr.live_u64s; int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added; int live_u64s_added, u64s_added;
bch2_mark_update(trans, insert);
ret = !btree_node_is_extents(b) ret = !btree_node_is_extents(b)
? bch2_insert_fixup_key(trans, insert) ? bch2_insert_fixup_key(trans, insert)
: bch2_insert_fixup_extent(trans, insert); : bch2_insert_fixup_extent(trans, insert);
......
...@@ -65,7 +65,9 @@ ...@@ -65,7 +65,9 @@
#include "bcachefs.h" #include "bcachefs.h"
#include "alloc_background.h" #include "alloc_background.h"
#include "bset.h"
#include "btree_gc.h" #include "btree_gc.h"
#include "btree_update.h"
#include "buckets.h" #include "buckets.h"
#include "error.h" #include "error.h"
#include "movinggc.h" #include "movinggc.h"
...@@ -346,7 +348,8 @@ void bch2_fs_usage_apply(struct bch_fs *c, ...@@ -346,7 +348,8 @@ void bch2_fs_usage_apply(struct bch_fs *c,
* reservation: * reservation:
*/ */
should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0); should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
if (WARN_ON(should_not_have_added > 0)) { if (WARN_ONCE(should_not_have_added > 0,
"disk usage increased without a reservation")) {
atomic64_sub(should_not_have_added, &c->sectors_available); atomic64_sub(should_not_have_added, &c->sectors_available);
added -= should_not_have_added; added -= should_not_have_added;
} }
...@@ -642,9 +645,6 @@ static void bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k, ...@@ -642,9 +645,6 @@ static void bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
struct bch_fs_usage *stats, struct bch_fs_usage *stats,
u64 journal_seq, unsigned flags) u64 journal_seq, unsigned flags)
{ {
unsigned replicas = bch2_extent_nr_dirty_ptrs(k);
BUG_ON(replicas && replicas - 1 > ARRAY_SIZE(stats->replicas));
BUG_ON(!sectors); BUG_ON(!sectors);
switch (k.k->type) { switch (k.k->type) {
...@@ -653,38 +653,43 @@ static void bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k, ...@@ -653,38 +653,43 @@ static void bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
struct bkey_s_c_extent e = bkey_s_c_to_extent(k); struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const union bch_extent_entry *entry; const union bch_extent_entry *entry;
struct extent_ptr_decoded p; struct extent_ptr_decoded p;
s64 cached_sectors = 0;
s64 dirty_sectors = 0;
unsigned replicas = 0;
extent_for_each_ptr_decode(e, p, entry) { extent_for_each_ptr_decode(e, p, entry) {
s64 disk_sectors = ptr_disk_sectors(e, p, sectors); s64 disk_sectors = ptr_disk_sectors(e, p, sectors);
/*
* fs level usage (which determines free space) is in
* uncompressed sectors, until copygc + compression is
* sorted out:
*
* note also that we always update @fs_usage, even when
* we otherwise wouldn't do anything because gc is
* running - this is because the caller still needs to
* account w.r.t. its disk reservation. It is caller's
* responsibility to not apply @fs_usage if gc is in
* progress.
*/
stats->replicas
[!p.ptr.cached && replicas ? replicas - 1 : 0].data
[!p.ptr.cached ? data_type : BCH_DATA_CACHED] +=
disk_sectors;
bch2_mark_pointer(c, e, p, disk_sectors, data_type, bch2_mark_pointer(c, e, p, disk_sectors, data_type,
stats, journal_seq, flags); stats, journal_seq, flags);
if (!p.ptr.cached)
replicas++;
if (p.ptr.cached)
cached_sectors += disk_sectors;
else
dirty_sectors += disk_sectors;
} }
replicas = clamp_t(unsigned, replicas,
1, ARRAY_SIZE(stats->replicas));
stats->replicas[0].data[BCH_DATA_CACHED] += cached_sectors;
stats->replicas[replicas - 1].data[data_type] += dirty_sectors;
break; break;
} }
case BCH_RESERVATION: case BCH_RESERVATION: {
if (replicas) unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
stats->replicas[replicas - 1].persistent_reserved +=
sectors * replicas; sectors *= replicas;
replicas = clamp_t(unsigned, replicas,
1, ARRAY_SIZE(stats->replicas));
stats->replicas[replicas - 1].persistent_reserved += sectors;
break; break;
} }
}
} }
void bch2_mark_key(struct bch_fs *c, void bch2_mark_key(struct bch_fs *c,
...@@ -748,6 +753,76 @@ void bch2_mark_key(struct bch_fs *c, ...@@ -748,6 +753,76 @@ void bch2_mark_key(struct bch_fs *c,
percpu_up_read(&c->usage_lock); percpu_up_read(&c->usage_lock);
} }
void bch2_mark_update(struct btree_insert *trans,
struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
struct bch_fs_usage stats = { 0 };
struct gc_pos pos = gc_pos_btree_node(b);
struct bkey_packed *_k;
if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
bch2_mark_key(c, btree_node_type(b), bkey_i_to_s_c(insert->k),
true,
bpos_min(insert->k->k.p, b->key.k.p).offset -
bkey_start_offset(&insert->k->k),
pos, &stats, trans->journal_res.seq, 0);
while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
KEY_TYPE_DISCARD))) {
struct bkey unpacked;
struct bkey_s_c k;
s64 sectors = 0;
k = bkey_disassemble(b, _k, &unpacked);
if (btree_node_is_extents(b)
? bkey_cmp(insert->k->k.p, bkey_start_pos(k.k)) <= 0
: bkey_cmp(insert->k->k.p, k.k->p))
break;
if (btree_node_is_extents(b)) {
switch (bch2_extent_overlap(&insert->k->k, k.k)) {
case BCH_EXTENT_OVERLAP_ALL:
sectors = -((s64) k.k->size);
break;
case BCH_EXTENT_OVERLAP_BACK:
sectors = bkey_start_offset(&insert->k->k) -
k.k->p.offset;
break;
case BCH_EXTENT_OVERLAP_FRONT:
sectors = bkey_start_offset(k.k) -
insert->k->k.p.offset;
break;
case BCH_EXTENT_OVERLAP_MIDDLE:
sectors = k.k->p.offset - insert->k->k.p.offset;
BUG_ON(sectors <= 0);
bch2_mark_key(c, btree_node_type(b), k,
true, sectors,
pos, &stats, trans->journal_res.seq, 0);
sectors = bkey_start_offset(&insert->k->k) -
k.k->p.offset;
break;
}
BUG_ON(sectors >= 0);
}
bch2_mark_key(c, btree_node_type(b), k,
false, sectors,
pos, &stats, trans->journal_res.seq, 0);
bch2_btree_node_iter_advance(&node_iter, b);
}
bch2_fs_usage_apply(c, &stats, trans->disk_res, pos);
}
/* Disk reservations: */ /* Disk reservations: */
static u64 __recalc_sectors_available(struct bch_fs *c) static u64 __recalc_sectors_available(struct bch_fs *c)
......
...@@ -213,6 +213,7 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *, ...@@ -213,6 +213,7 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
void bch2_mark_key(struct bch_fs *, enum bkey_type, struct bkey_s_c, void bch2_mark_key(struct bch_fs *, enum bkey_type, struct bkey_s_c,
bool, s64, struct gc_pos, bool, s64, struct gc_pos,
struct bch_fs_usage *, u64, unsigned); struct bch_fs_usage *, u64, unsigned);
void bch2_mark_update(struct btree_insert *, struct btree_insert_entry *);
void bch2_recalc_sectors_available(struct bch_fs *); void bch2_recalc_sectors_available(struct bch_fs *);
......
...@@ -1009,7 +1009,6 @@ struct extent_insert_state { ...@@ -1009,7 +1009,6 @@ struct extent_insert_state {
struct btree_insert *trans; struct btree_insert *trans;
struct btree_insert_entry *insert; struct btree_insert_entry *insert;
struct bpos committed; struct bpos committed;
struct bch_fs_usage stats;
/* for deleting: */ /* for deleting: */
struct bkey_i whiteout; struct bkey_i whiteout;
...@@ -1018,54 +1017,6 @@ struct extent_insert_state { ...@@ -1018,54 +1017,6 @@ struct extent_insert_state {
bool deleting; bool deleting;
}; };
static void bch2_add_sectors(struct extent_insert_state *s,
struct bkey_s_c k, u64 offset, s64 sectors)
{
struct bch_fs *c = s->trans->c;
struct btree *b = s->insert->iter->l[0].b;
EBUG_ON(bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0);
if (!sectors)
return;
bch2_mark_key(c, BKEY_TYPE_EXTENTS, k, sectors > 0, sectors,
gc_pos_btree_node(b), &s->stats,
s->trans->journal_res.seq, 0);
}
static void bch2_subtract_sectors(struct extent_insert_state *s,
struct bkey_s_c k, u64 offset, s64 sectors)
{
bch2_add_sectors(s, k, offset, -sectors);
}
/* These wrappers subtract exactly the sectors that we're removing from @k */
static void bch2_cut_subtract_back(struct extent_insert_state *s,
struct bpos where, struct bkey_s k)
{
bch2_subtract_sectors(s, k.s_c, where.offset,
k.k->p.offset - where.offset);
bch2_cut_back(where, k.k);
}
static void bch2_cut_subtract_front(struct extent_insert_state *s,
struct bpos where, struct bkey_s k)
{
bch2_subtract_sectors(s, k.s_c, bkey_start_offset(k.k),
where.offset - bkey_start_offset(k.k));
__bch2_cut_front(where, k);
}
static void bch2_drop_subtract(struct extent_insert_state *s, struct bkey_s k)
{
if (k.k->size)
bch2_subtract_sectors(s, k.s_c,
bkey_start_offset(k.k), k.k->size);
k.k->size = 0;
k.k->type = KEY_TYPE_DELETED;
}
static bool bch2_extent_merge_inline(struct bch_fs *, static bool bch2_extent_merge_inline(struct bch_fs *,
struct btree_iter *, struct btree_iter *,
struct bkey_packed *, struct bkey_packed *,
...@@ -1166,11 +1117,7 @@ static void extent_insert_committed(struct extent_insert_state *s) ...@@ -1166,11 +1117,7 @@ static void extent_insert_committed(struct extent_insert_state *s)
if (s->deleting) if (s->deleting)
split.k.k.type = KEY_TYPE_DISCARD; split.k.k.type = KEY_TYPE_DISCARD;
if (!(s->trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) bch2_cut_back(s->committed, &split.k.k);
bch2_cut_subtract_back(s, s->committed,
bkey_i_to_s(&split.k));
else
bch2_cut_back(s->committed, &split.k.k);
if (!bkey_cmp(s->committed, iter->pos)) if (!bkey_cmp(s->committed, iter->pos))
return; return;
...@@ -1290,7 +1237,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, ...@@ -1290,7 +1237,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
switch (overlap) { switch (overlap) {
case BCH_EXTENT_OVERLAP_FRONT: case BCH_EXTENT_OVERLAP_FRONT:
/* insert overlaps with start of k: */ /* insert overlaps with start of k: */
bch2_cut_subtract_front(s, insert->k.p, k); __bch2_cut_front(insert->k.p, k);
BUG_ON(bkey_deleted(k.k)); BUG_ON(bkey_deleted(k.k));
extent_save(l->b, _k, k.k); extent_save(l->b, _k, k.k);
verify_modified_extent(iter, _k); verify_modified_extent(iter, _k);
...@@ -1298,7 +1245,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, ...@@ -1298,7 +1245,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
case BCH_EXTENT_OVERLAP_BACK: case BCH_EXTENT_OVERLAP_BACK:
/* insert overlaps with end of k: */ /* insert overlaps with end of k: */
bch2_cut_subtract_back(s, bkey_start_pos(&insert->k), k); bch2_cut_back(bkey_start_pos(&insert->k), k.k);
BUG_ON(bkey_deleted(k.k)); BUG_ON(bkey_deleted(k.k));
extent_save(l->b, _k, k.k); extent_save(l->b, _k, k.k);
...@@ -1318,7 +1265,8 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, ...@@ -1318,7 +1265,8 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
if (!bkey_whiteout(k.k)) if (!bkey_whiteout(k.k))
btree_account_key_drop(l->b, _k); btree_account_key_drop(l->b, _k);
bch2_drop_subtract(s, k); k.k->size = 0;
k.k->type = KEY_TYPE_DELETED;
if (_k >= btree_bset_last(l->b)->start) { if (_k >= btree_bset_last(l->b)->start) {
unsigned u64s = _k->u64s; unsigned u64s = _k->u64s;
...@@ -1358,14 +1306,11 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, ...@@ -1358,14 +1306,11 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k); bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
BUG_ON(bkey_deleted(&split.k.k)); BUG_ON(bkey_deleted(&split.k.k));
bch2_cut_subtract_front(s, insert->k.p, k); __bch2_cut_front(insert->k.p, k);
BUG_ON(bkey_deleted(k.k)); BUG_ON(bkey_deleted(k.k));
extent_save(l->b, _k, k.k); extent_save(l->b, _k, k.k);
verify_modified_extent(iter, _k); verify_modified_extent(iter, _k);
bch2_add_sectors(s, bkey_i_to_s_c(&split.k),
bkey_start_offset(&split.k.k),
split.k.k.size);
extent_bset_insert(c, iter, &split.k); extent_bset_insert(c, iter, &split.k);
break; break;
} }
...@@ -1414,8 +1359,6 @@ static void __bch2_insert_fixup_extent(struct extent_insert_state *s) ...@@ -1414,8 +1359,6 @@ static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
!bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) { !bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) {
if (!bkey_whiteout(k.k)) { if (!bkey_whiteout(k.k)) {
btree_account_key_drop(l->b, _k); btree_account_key_drop(l->b, _k);
bch2_subtract_sectors(s, k.s_c,
bkey_start_offset(k.k), k.k->size);
_k->type = KEY_TYPE_DISCARD; _k->type = KEY_TYPE_DISCARD;
reserve_whiteout(l->b, _k); reserve_whiteout(l->b, _k);
} }
...@@ -1505,7 +1448,6 @@ enum btree_insert_ret ...@@ -1505,7 +1448,6 @@ enum btree_insert_ret
bch2_insert_fixup_extent(struct btree_insert *trans, bch2_insert_fixup_extent(struct btree_insert *trans,
struct btree_insert_entry *insert) struct btree_insert_entry *insert)
{ {
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter; struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b; struct btree *b = iter->l[0].b;
struct extent_insert_state s = { struct extent_insert_state s = {
...@@ -1530,19 +1472,10 @@ bch2_insert_fixup_extent(struct btree_insert *trans, ...@@ -1530,19 +1472,10 @@ bch2_insert_fixup_extent(struct btree_insert *trans,
*/ */
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k))); EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
if (!s.deleting &&
!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
bch2_add_sectors(&s, bkey_i_to_s_c(insert->k),
bkey_start_offset(&insert->k->k),
insert->k->k.size);
__bch2_insert_fixup_extent(&s); __bch2_insert_fixup_extent(&s);
extent_insert_committed(&s); extent_insert_committed(&s);
bch2_fs_usage_apply(c, &s.stats, trans->disk_res,
gc_pos_btree_node(b));
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k))); EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
EBUG_ON(bkey_cmp(iter->pos, s.committed)); EBUG_ON(bkey_cmp(iter->pos, s.committed));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment