Commit 4de77495 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Reorganize extents.c

Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 4be1a412
......@@ -200,7 +200,7 @@ bch2_extent_can_insert(struct btree_trans *trans,
*u64s += _k->u64s;
if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
(sectors = bch2_extent_is_compressed(k))) {
(sectors = bch2_bkey_sectors_compressed(k))) {
int flags = trans->flags & BTREE_INSERT_NOFAIL
? BCH_DISK_RESERVATION_NOFAIL : 0;
......
......@@ -24,81 +24,15 @@
#include "trace.h"
#include "util.h"
unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
{
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
unsigned nr_ptrs = 0;
bkey_for_each_ptr(p, ptr)
nr_ptrs++;
return nr_ptrs;
}
unsigned bch2_bkey_nr_dirty_ptrs(struct bkey_s_c k)
{
unsigned nr_ptrs = 0;
switch (k.k->type) {
case KEY_TYPE_btree_ptr:
case KEY_TYPE_extent:
case KEY_TYPE_reflink_v: {
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
bkey_for_each_ptr(p, ptr)
nr_ptrs += !ptr->cached;
BUG_ON(!nr_ptrs);
break;
}
case KEY_TYPE_reservation:
nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
break;
}
return nr_ptrs;
}
static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
struct extent_ptr_decoded p)
{
unsigned durability = 0;
struct bch_dev *ca;
if (p.ptr.cached)
return 0;
ca = bch_dev_bkey_exists(c, p.ptr.dev);
if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
durability = max_t(unsigned, durability, ca->mi.durability);
if (p.has_ec) {
struct stripe *s =
genradix_ptr(&c->stripes[0], p.ec.idx);
if (WARN_ON(!s))
goto out;
durability = max_t(unsigned, durability, s->nr_redundant);
}
out:
return durability;
}
unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
unsigned durability = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
durability += bch2_extent_ptr_durability(c, p);
static unsigned bch2_crc_field_size_max[] = {
[BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
[BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
[BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
};
return durability;
}
static void bch2_extent_crc_pack(union bch_extent_crc *,
struct bch_extent_crc_unpacked,
enum bch_extent_entry_type);
static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
unsigned dev)
......@@ -218,172 +152,299 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
return ret;
}
void bch2_bkey_append_ptr(struct bkey_i *k,
struct bch_extent_ptr ptr)
{
EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
switch (k->k.type) {
case KEY_TYPE_btree_ptr:
case KEY_TYPE_extent:
EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
memcpy((void *) &k->v + bkey_val_bytes(&k->k),
&ptr,
sizeof(ptr));
k->u64s++;
break;
default:
BUG();
}
}
/* KEY_TYPE_btree_ptr: */
void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
struct bch_extent_ptr *ptr;
if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
return "value too big";
bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
return bch2_bkey_ptrs_invalid(c, k);
}
const struct bch_extent_ptr *
bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
const char *err;
char buf[160];
struct bucket_mark mark;
struct bch_dev *ca;
bkey_for_each_ptr(ptrs, ptr)
if (ptr->dev == dev)
return ptr;
return NULL;
}
bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
!bch2_bkey_replicas_marked(c, k, false), c,
"btree key bad (replicas not marked in superblock):\n%s",
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
bkey_for_each_ptr(ptrs, ptr)
if (bch2_dev_in_target(c, ptr->dev, target) &&
(!ptr->cached ||
!ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
return true;
if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
return;
return false;
}
bkey_for_each_ptr(ptrs, ptr) {
ca = bch_dev_bkey_exists(c, ptr->dev);
/* extent specific utility code */
mark = ptr_bucket_mark(ca, ptr);
const struct bch_extent_ptr *
bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
{
const struct bch_extent_ptr *ptr;
err = "stale";
if (gen_after(mark.gen, ptr->gen))
goto err;
extent_for_each_ptr(e, ptr)
if (ptr->dev == dev)
return ptr;
err = "inconsistent";
if (mark.data_type != BCH_DATA_BTREE ||
mark.dirty_sectors < c->opts.btree_node_size)
goto err;
}
return NULL;
return;
err:
bch2_bkey_val_to_text(&PBUF(buf), c, k);
bch2_fs_bug(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
err, buf, PTR_BUCKET_NR(ca, ptr),
mark.gen, (unsigned) mark.v.counter);
}
const struct bch_extent_ptr *
bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group)
void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
const struct bch_extent_ptr *ptr;
extent_for_each_ptr(e, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
bch2_bkey_ptrs_to_text(out, c, k);
}
if (ca->mi.group &&
ca->mi.group - 1 == group)
return ptr;
}
/* KEY_TYPE_extent: */
return NULL;
const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
return bch2_bkey_ptrs_invalid(c, k);
}
unsigned bch2_extent_is_compressed(struct bkey_s_c k)
void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
unsigned ret = 0;
char buf[160];
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (!p.ptr.cached &&
p.crc.compression_type != BCH_COMPRESSION_NONE)
ret += p.crc.compressed_size;
/*
* XXX: we should be doing most/all of these checks at startup time,
* where we check bch2_bkey_invalid() in btree_node_read_done()
*
* But note that we can't check for stale pointers or incorrect gc marks
* until after journal replay is done (it might be an extent that's
* going to get overwritten during replay)
*/
return ret;
}
if (percpu_down_read_trylock(&c->mark_lock)) {
bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
!bch2_bkey_replicas_marked_locked(c, e.s_c, false), c,
"extent key bad (replicas not marked in superblock):\n%s",
(bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
percpu_up_read(&c->mark_lock);
}
/*
* If journal replay hasn't finished, we might be seeing keys
* that will be overwritten by the time journal replay is done:
*/
if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
return;
bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
struct bch_extent_ptr m, u64 offset)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
extent_for_each_ptr_decode(e, p, entry) {
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
unsigned stale = gen_after(mark.gen, p.ptr.gen);
unsigned disk_sectors = ptr_disk_sectors(p);
unsigned mark_sectors = p.ptr.cached
? mark.cached_sectors
: mark.dirty_sectors;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (p.ptr.dev == m.dev &&
p.ptr.gen == m.gen &&
(s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
(s64) m.offset - offset)
return true;
bch2_fs_bug_on(stale && !p.ptr.cached, c,
"stale dirty pointer (ptr gen %u bucket %u",
p.ptr.gen, mark.gen);
return false;
bch2_fs_bug_on(stale > 96, c, "key too stale: %i", stale);
bch2_fs_bug_on(!stale &&
(mark.data_type != BCH_DATA_USER ||
mark_sectors < disk_sectors), c,
"extent pointer not marked: %s:\n"
"type %u sectors %u < %u",
(bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
mark.data_type,
mark_sectors, disk_sectors);
}
}
static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
union bch_extent_entry *entry)
void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
union bch_extent_entry *i = ptrs.start;
if (i == entry)
return NULL;
while (extent_entry_next(i) != entry)
i = extent_entry_next(i);
return i;
bch2_bkey_ptrs_to_text(out, c, k);
}
union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
struct bch_extent_ptr *ptr)
enum merge_result bch2_extent_merge(struct bch_fs *c,
struct bkey_s _l, struct bkey_s _r)
{
struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
union bch_extent_entry *dst, *src, *prev;
bool drop_crc = true;
EBUG_ON(ptr < &ptrs.start->ptr ||
ptr >= &ptrs.end->ptr);
EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
struct bkey_s_extent l = bkey_s_to_extent(_l);
struct bkey_s_extent r = bkey_s_to_extent(_r);
union bch_extent_entry *en_l = l.v->start;
union bch_extent_entry *en_r = r.v->start;
struct bch_extent_crc_unpacked crc_l, crc_r;
src = extent_entry_next(to_entry(ptr));
if (src != ptrs.end &&
!extent_entry_is_crc(src))
drop_crc = false;
if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
return BCH_MERGE_NOMERGE;
dst = to_entry(ptr);
while ((prev = extent_entry_prev(ptrs, dst))) {
if (extent_entry_is_ptr(prev))
crc_l = bch2_extent_crc_unpack(l.k, NULL);
extent_for_each_entry(l, en_l) {
en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
if (extent_entry_type(en_l) != extent_entry_type(en_r))
return BCH_MERGE_NOMERGE;
switch (extent_entry_type(en_l)) {
case BCH_EXTENT_ENTRY_ptr: {
const struct bch_extent_ptr *lp = &en_l->ptr;
const struct bch_extent_ptr *rp = &en_r->ptr;
struct bch_dev *ca;
if (lp->offset + crc_l.compressed_size != rp->offset ||
lp->dev != rp->dev ||
lp->gen != rp->gen)
return BCH_MERGE_NOMERGE;
/* We don't allow extents to straddle buckets: */
ca = bch_dev_bkey_exists(c, lp->dev);
if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
return BCH_MERGE_NOMERGE;
break;
}
case BCH_EXTENT_ENTRY_stripe_ptr:
if (en_l->stripe_ptr.block != en_r->stripe_ptr.block ||
en_l->stripe_ptr.idx != en_r->stripe_ptr.idx)
return BCH_MERGE_NOMERGE;
break;
case BCH_EXTENT_ENTRY_crc32:
case BCH_EXTENT_ENTRY_crc64:
case BCH_EXTENT_ENTRY_crc128:
crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
if (crc_l.csum_type != crc_r.csum_type ||
crc_l.compression_type != crc_r.compression_type ||
crc_l.nonce != crc_r.nonce)
return BCH_MERGE_NOMERGE;
if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
crc_r.offset)
return BCH_MERGE_NOMERGE;
if (!bch2_checksum_mergeable(crc_l.csum_type))
return BCH_MERGE_NOMERGE;
if (crc_l.compression_type)
return BCH_MERGE_NOMERGE;
if (crc_l.csum_type &&
crc_l.uncompressed_size +
crc_r.uncompressed_size > c->sb.encoded_extent_max)
return BCH_MERGE_NOMERGE;
if (crc_l.uncompressed_size + crc_r.uncompressed_size - 1 >
bch2_crc_field_size_max[extent_entry_type(en_l)])
return BCH_MERGE_NOMERGE;
if (extent_entry_is_crc(prev)) {
if (drop_crc)
dst = prev;
break;
default:
return BCH_MERGE_NOMERGE;
}
}
dst = prev;
extent_for_each_entry(l, en_l) {
struct bch_extent_crc_unpacked crc_l, crc_r;
en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
if (!extent_entry_is_crc(en_l))
continue;
crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
crc_l.csum,
crc_r.csum,
crc_r.uncompressed_size << 9);
crc_l.uncompressed_size += crc_r.uncompressed_size;
crc_l.compressed_size += crc_r.compressed_size;
bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
extent_entry_type(en_l));
}
memmove_u64s_down(dst, src,
(u64 *) ptrs.end - (u64 *) src);
k.k->u64s -= (u64 *) src - (u64 *) dst;
bch2_key_resize(l.k, l.k->size + r.k->size);
return dst;
return BCH_MERGE_MERGE;
}
/* KEY_TYPE_reservation: */
const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
return "incorrect value size";
if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
return "invalid nr_replicas";
return NULL;
}
void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
pr_buf(out, "generation %u replicas %u",
le32_to_cpu(r.v->generation),
r.v->nr_replicas);
}
enum merge_result bch2_reservation_merge(struct bch_fs *c,
struct bkey_s _l, struct bkey_s _r)
{
struct bkey_s_reservation l = bkey_s_to_reservation(_l);
struct bkey_s_reservation r = bkey_s_to_reservation(_r);
if (l.v->generation != r.v->generation ||
l.v->nr_replicas != r.v->nr_replicas)
return BCH_MERGE_NOMERGE;
if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
bch2_key_resize(l.k, KEY_SIZE_MAX);
bch2_cut_front_s(l.k->p, r.s);
return BCH_MERGE_PARTIAL;
}
bch2_key_resize(l.k, l.k->size + r.k->size);
return BCH_MERGE_MERGE;
}
/* Extent checksum entries: */
/* returns true if not equal */
static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
struct bch_extent_crc_unpacked r)
{
return (l.csum_type != r.csum_type ||
l.compression_type != r.compression_type ||
l.compressed_size != r.compressed_size ||
l.uncompressed_size != r.uncompressed_size ||
l.offset != r.offset ||
l.live_size != r.live_size ||
l.nonce != r.nonce ||
bch2_crc_cmp(l.csum, r.csum));
}
static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
......@@ -462,509 +523,237 @@ bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
return ret;
}
/* returns true if not equal */
static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
struct bch_extent_crc_unpacked r)
{
return (l.csum_type != r.csum_type ||
l.compression_type != r.compression_type ||
l.compressed_size != r.compressed_size ||
l.uncompressed_size != r.uncompressed_size ||
l.offset != r.offset ||
l.live_size != r.live_size ||
l.nonce != r.nonce ||
bch2_crc_cmp(l.csum, r.csum));
}
void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
static void bch2_extent_crc_pack(union bch_extent_crc *dst,
struct bch_extent_crc_unpacked src,
enum bch_extent_entry_type type)
{
union bch_extent_entry *entry;
u64 *d = (u64 *) bkeyp_val(f, k);
unsigned i;
for (i = 0; i < bkeyp_val_u64s(f, k); i++)
d[i] = swab64(d[i]);
#define set_common_fields(_dst, _src) \
_dst.type = 1 << type; \
_dst.csum_type = _src.csum_type, \
_dst.compression_type = _src.compression_type, \
_dst._compressed_size = _src.compressed_size - 1, \
_dst._uncompressed_size = _src.uncompressed_size - 1, \
_dst.offset = _src.offset
for (entry = (union bch_extent_entry *) d;
entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
entry = extent_entry_next(entry)) {
switch (extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
break;
switch (type) {
case BCH_EXTENT_ENTRY_crc32:
entry->crc32.csum = swab32(entry->crc32.csum);
set_common_fields(dst->crc32, src);
dst->crc32.csum = *((__le32 *) &src.csum.lo);
break;
case BCH_EXTENT_ENTRY_crc64:
entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
set_common_fields(dst->crc64, src);
dst->crc64.nonce = src.nonce;
dst->crc64.csum_lo = src.csum.lo;
dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
break;
case BCH_EXTENT_ENTRY_crc128:
entry->crc128.csum.hi = (__force __le64)
swab64((__force u64) entry->crc128.csum.hi);
entry->crc128.csum.lo = (__force __le64)
swab64((__force u64) entry->crc128.csum.lo);
break;
case BCH_EXTENT_ENTRY_stripe_ptr:
set_common_fields(dst->crc128, src);
dst->crc128.nonce = src.nonce;
dst->crc128.csum = src.csum;
break;
default:
BUG();
}
}
#undef set_common_fields
}
void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
void bch2_extent_crc_append(struct bkey_i *k,
struct bch_extent_crc_unpacked new)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc;
const struct bch_extent_ptr *ptr;
const struct bch_extent_stripe_ptr *ec;
struct bch_dev *ca;
bool first = true;
struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
union bch_extent_crc *crc = (void *) ptrs.end;
enum bch_extent_entry_type type;
bkey_extent_entry_for_each(ptrs, entry) {
if (!first)
pr_buf(out, " ");
if (bch_crc_bytes[new.csum_type] <= 4 &&
new.uncompressed_size - 1 <= CRC32_SIZE_MAX &&
new.nonce <= CRC32_NONCE_MAX)
type = BCH_EXTENT_ENTRY_crc32;
else if (bch_crc_bytes[new.csum_type] <= 10 &&
new.uncompressed_size - 1 <= CRC64_SIZE_MAX &&
new.nonce <= CRC64_NONCE_MAX)
type = BCH_EXTENT_ENTRY_crc64;
else if (bch_crc_bytes[new.csum_type] <= 16 &&
new.uncompressed_size - 1 <= CRC128_SIZE_MAX &&
new.nonce <= CRC128_NONCE_MAX)
type = BCH_EXTENT_ENTRY_crc128;
else
BUG();
switch (__extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
ptr = entry_to_ptr(entry);
ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
? bch_dev_bkey_exists(c, ptr->dev)
: NULL;
bch2_extent_crc_pack(crc, new, type);
pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
(u64) ptr->offset, ptr->gen,
ptr->cached ? " cached" : "",
ca && ptr_stale(ca, ptr)
? " stale" : "");
break;
case BCH_EXTENT_ENTRY_crc32:
case BCH_EXTENT_ENTRY_crc64:
case BCH_EXTENT_ENTRY_crc128:
crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
k->k.u64s += extent_entry_u64s(ptrs.end);
pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
crc.compressed_size,
crc.uncompressed_size,
crc.offset, crc.nonce,
crc.csum_type,
crc.compression_type);
break;
case BCH_EXTENT_ENTRY_stripe_ptr:
ec = &entry->stripe_ptr;
EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
}
pr_buf(out, "ec: idx %llu block %u",
(u64) ec->idx, ec->block);
break;
default:
pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
return;
}
/* Generic code for keys with pointers: */
first = false;
}
unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
{
return bch2_bkey_devs(k).nr;
}
static const char *extent_ptr_invalid(const struct bch_fs *c,
struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
unsigned size_ondisk,
bool metadata)
unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr2;
struct bch_dev *ca;
if (!bch2_dev_exists2(c, ptr->dev))
return "pointer to invalid device";
ca = bch_dev_bkey_exists(c, ptr->dev);
if (!ca)
return "pointer to invalid device";
bkey_for_each_ptr(ptrs, ptr2)
if (ptr != ptr2 && ptr->dev == ptr2->dev)
return "multiple pointers to same device";
if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
return "offset past end of device";
if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
return "offset before first bucket";
if (bucket_remainder(ca, ptr->offset) +
size_ondisk > ca->mi.bucket_size)
return "spans multiple buckets";
return NULL;
return k.k->type == KEY_TYPE_reservation
? bkey_s_c_to_reservation(k).v->nr_replicas
: bch2_bkey_dirty_devs(k).nr;
}
const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
{
unsigned ret = 0;
if (k.k->type == KEY_TYPE_reservation) {
ret = bkey_s_c_to_reservation(k).v->nr_replicas;
} else {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc;
unsigned size_ondisk = k.k->size;
const char *reason;
unsigned nonce = UINT_MAX;
if (k.k->type == KEY_TYPE_btree_ptr)
size_ondisk = c->opts.btree_node_size;
bkey_extent_entry_for_each(ptrs, entry) {
if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
return "invalid extent entry type";
if (k.k->type == KEY_TYPE_btree_ptr &&
!extent_entry_is_ptr(entry))
return "has non ptr field";
switch (extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
reason = extent_ptr_invalid(c, k, &entry->ptr,
size_ondisk, false);
if (reason)
return reason;
break;
case BCH_EXTENT_ENTRY_crc32:
case BCH_EXTENT_ENTRY_crc64:
case BCH_EXTENT_ENTRY_crc128:
crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
if (crc.offset + crc.live_size >
crc.uncompressed_size)
return "checksum offset + key size > uncompressed size";
size_ondisk = crc.compressed_size;
if (!bch2_checksum_type_valid(c, crc.csum_type))
return "invalid checksum type";
if (crc.compression_type >= BCH_COMPRESSION_NR)
return "invalid compression type";
struct extent_ptr_decoded p;
if (bch2_csum_type_is_encryption(crc.csum_type)) {
if (nonce == UINT_MAX)
nonce = crc.offset + crc.nonce;
else if (nonce != crc.offset + crc.nonce)
return "incorrect nonce";
}
break;
case BCH_EXTENT_ENTRY_stripe_ptr:
break;
}
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
ret += !p.ptr.cached &&
p.crc.compression_type == BCH_COMPRESSION_NONE;
}
return NULL;
}
/* Btree ptrs */
const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
return "value too big";
return bch2_bkey_ptrs_invalid(c, k);
return ret;
}
void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
const char *err;
char buf[160];
struct bucket_mark mark;
struct bch_dev *ca;
bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
!bch2_bkey_replicas_marked(c, k, false), c,
"btree key bad (replicas not marked in superblock):\n%s",
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
return;
bkey_for_each_ptr(ptrs, ptr) {
ca = bch_dev_bkey_exists(c, ptr->dev);
mark = ptr_bucket_mark(ca, ptr);
err = "stale";
if (gen_after(mark.gen, ptr->gen))
goto err;
err = "inconsistent";
if (mark.data_type != BCH_DATA_BTREE ||
mark.dirty_sectors < c->opts.btree_node_size)
goto err;
}
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
unsigned ret = 0;
return;
err:
bch2_bkey_val_to_text(&PBUF(buf), c, k);
bch2_fs_bug(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
err, buf, PTR_BUCKET_NR(ca, ptr),
mark.gen, (unsigned) mark.v.counter);
}
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (!p.ptr.cached &&
p.crc.compression_type != BCH_COMPRESSION_NONE)
ret += p.crc.compressed_size;
void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
bch2_bkey_ptrs_to_text(out, c, k);
return ret;
}
/* Extents */
int bch2_cut_front_s(struct bpos where, struct bkey_s k)
bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
unsigned nr_replicas)
{
unsigned new_val_u64s = bkey_val_u64s(k.k);
int val_u64s_delta;
u64 sub;
if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
return 0;
EBUG_ON(bkey_cmp(where, k.k->p) > 0);
sub = where.offset - bkey_start_offset(k.k);
k.k->size -= sub;
if (!k.k->size) {
k.k->type = KEY_TYPE_deleted;
new_val_u64s = 0;
}
switch (k.k->type) {
case KEY_TYPE_extent:
case KEY_TYPE_reflink_v: {
struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
union bch_extent_entry *entry;
bool seen_crc = false;
bkey_extent_entry_for_each(ptrs, entry) {
switch (extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
if (!seen_crc)
entry->ptr.offset += sub;
break;
case BCH_EXTENT_ENTRY_crc32:
entry->crc32.offset += sub;
break;
case BCH_EXTENT_ENTRY_crc64:
entry->crc64.offset += sub;
break;
case BCH_EXTENT_ENTRY_crc128:
entry->crc128.offset += sub;
break;
case BCH_EXTENT_ENTRY_stripe_ptr:
break;
}
struct btree_trans trans;
struct btree_iter *iter;
struct bpos end = pos;
struct bkey_s_c k;
bool ret = true;
int err;
if (extent_entry_is_crc(entry))
seen_crc = true;
}
end.offset += size;
break;
}
case KEY_TYPE_reflink_p: {
struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
bch2_trans_init(&trans, c, 0, 0);
le64_add_cpu(&p.v->idx, sub);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
BTREE_ITER_SLOTS, k, err) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
}
case KEY_TYPE_inline_data: {
struct bkey_s_inline_data d = bkey_s_to_inline_data(k);
sub = min_t(u64, sub << 9, bkey_val_bytes(d.k));
memmove(d.v->data,
d.v->data + sub,
bkey_val_bytes(d.k) - sub);
new_val_u64s -= sub >> 3;
if (nr_replicas > bch2_bkey_nr_ptrs_fully_allocated(k)) {
ret = false;
break;
}
}
bch2_trans_exit(&trans);
val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
BUG_ON(val_u64s_delta < 0);
set_bkey_val_u64s(k.k, new_val_u64s);
memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
return -val_u64s_delta;
return ret;
}
int bch2_cut_back_s(struct bpos where, struct bkey_s k)
static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
struct extent_ptr_decoded p)
{
unsigned new_val_u64s = bkey_val_u64s(k.k);
int val_u64s_delta;
u64 len = 0;
unsigned durability = 0;
struct bch_dev *ca;
if (bkey_cmp(where, k.k->p) >= 0)
if (p.ptr.cached)
return 0;
EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
ca = bch_dev_bkey_exists(c, p.ptr.dev);
len = where.offset - bkey_start_offset(k.k);
if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
durability = max_t(unsigned, durability, ca->mi.durability);
k.k->p = where;
k.k->size = len;
if (p.has_ec) {
struct stripe *s =
genradix_ptr(&c->stripes[0], p.ec.idx);
if (!len) {
k.k->type = KEY_TYPE_deleted;
new_val_u64s = 0;
}
if (WARN_ON(!s))
goto out;
switch (k.k->type) {
case KEY_TYPE_inline_data:
new_val_u64s = min(new_val_u64s, k.k->size << 6);
break;
durability = max_t(unsigned, durability, s->nr_redundant);
}
val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
BUG_ON(val_u64s_delta < 0);
set_bkey_val_u64s(k.k, new_val_u64s);
memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
return -val_u64s_delta;
}
const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
return bch2_bkey_ptrs_invalid(c, k);
out:
return durability;
}
void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
char buf[160];
/*
* XXX: we should be doing most/all of these checks at startup time,
* where we check bch2_bkey_invalid() in btree_node_read_done()
*
* But note that we can't check for stale pointers or incorrect gc marks
* until after journal replay is done (it might be an extent that's
* going to get overwritten during replay)
*/
if (percpu_down_read_trylock(&c->mark_lock)) {
bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
!bch2_bkey_replicas_marked_locked(c, e.s_c, false), c,
"extent key bad (replicas not marked in superblock):\n%s",
(bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
percpu_up_read(&c->mark_lock);
}
/*
* If journal replay hasn't finished, we might be seeing keys
* that will be overwritten by the time journal replay is done:
*/
if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
return;
extent_for_each_ptr_decode(e, p, entry) {
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
unsigned stale = gen_after(mark.gen, p.ptr.gen);
unsigned disk_sectors = ptr_disk_sectors(p);
unsigned mark_sectors = p.ptr.cached
? mark.cached_sectors
: mark.dirty_sectors;
bch2_fs_bug_on(stale && !p.ptr.cached, c,
"stale dirty pointer (ptr gen %u bucket %u",
p.ptr.gen, mark.gen);
unsigned durability = 0;
bch2_fs_bug_on(stale > 96, c, "key too stale: %i", stale);
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
durability += bch2_extent_ptr_durability(c, p);
bch2_fs_bug_on(!stale &&
(mark.data_type != BCH_DATA_USER ||
mark_sectors < disk_sectors), c,
"extent pointer not marked: %s:\n"
"type %u sectors %u < %u",
(bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
mark.data_type,
mark_sectors, disk_sectors);
}
return durability;
}
void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
unsigned target,
unsigned nr_desired_replicas)
{
bch2_bkey_ptrs_to_text(out, c, k);
}
struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
union bch_extent_entry *entry;
struct extent_ptr_decoded p;
int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
static unsigned bch2_crc_field_size_max[] = {
[BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
[BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
[BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
};
if (target && extra > 0)
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
int n = bch2_extent_ptr_durability(c, p);
static void bch2_extent_crc_pack(union bch_extent_crc *dst,
struct bch_extent_crc_unpacked src,
enum bch_extent_entry_type type)
{
#define set_common_fields(_dst, _src) \
_dst.type = 1 << type; \
_dst.csum_type = _src.csum_type, \
_dst.compression_type = _src.compression_type, \
_dst._compressed_size = _src.compressed_size - 1, \
_dst._uncompressed_size = _src.uncompressed_size - 1, \
_dst.offset = _src.offset
if (n && n <= extra &&
!bch2_dev_in_target(c, p.ptr.dev, target)) {
entry->ptr.cached = true;
extra -= n;
}
}
switch (type) {
case BCH_EXTENT_ENTRY_crc32:
set_common_fields(dst->crc32, src);
dst->crc32.csum = *((__le32 *) &src.csum.lo);
break;
case BCH_EXTENT_ENTRY_crc64:
set_common_fields(dst->crc64, src);
dst->crc64.nonce = src.nonce;
dst->crc64.csum_lo = src.csum.lo;
dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
break;
case BCH_EXTENT_ENTRY_crc128:
set_common_fields(dst->crc128, src);
dst->crc128.nonce = src.nonce;
dst->crc128.csum = src.csum;
break;
default:
BUG();
if (extra > 0)
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
int n = bch2_extent_ptr_durability(c, p);
if (n && n <= extra) {
entry->ptr.cached = true;
extra -= n;
}
}
#undef set_common_fields
}
void bch2_extent_crc_append(struct bkey_i *k,
struct bch_extent_crc_unpacked new)
{
struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
union bch_extent_crc *crc = (void *) ptrs.end;
enum bch_extent_entry_type type;
if (bch_crc_bytes[new.csum_type] <= 4 &&
new.uncompressed_size - 1 <= CRC32_SIZE_MAX &&
new.nonce <= CRC32_NONCE_MAX)
type = BCH_EXTENT_ENTRY_crc32;
else if (bch_crc_bytes[new.csum_type] <= 10 &&
new.uncompressed_size - 1 <= CRC64_SIZE_MAX &&
new.nonce <= CRC64_NONCE_MAX)
type = BCH_EXTENT_ENTRY_crc64;
else if (bch_crc_bytes[new.csum_type] <= 16 &&
new.uncompressed_size - 1 <= CRC128_SIZE_MAX &&
new.nonce <= CRC128_NONCE_MAX)
type = BCH_EXTENT_ENTRY_crc128;
else
BUG();
void bch2_bkey_append_ptr(struct bkey_i *k,
struct bch_extent_ptr ptr)
{
EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
bch2_extent_crc_pack(crc, new, type);
switch (k->k.type) {
case KEY_TYPE_btree_ptr:
case KEY_TYPE_extent:
EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
k->k.u64s += extent_entry_u64s(ptrs.end);
ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
memcpy((void *) &k->v + bkey_val_bytes(&k->k),
&ptr,
sizeof(ptr));
k->u64s++;
break;
default:
BUG();
}
}
static inline void __extent_entry_insert(struct bkey_i *k,
......@@ -1010,6 +799,107 @@ void bch2_extent_ptr_decoded_append(struct bkey_i *k,
}
}
static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
union bch_extent_entry *entry)
{
union bch_extent_entry *i = ptrs.start;
if (i == entry)
return NULL;
while (extent_entry_next(i) != entry)
i = extent_entry_next(i);
return i;
}
union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
struct bch_extent_ptr *ptr)
{
struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
union bch_extent_entry *dst, *src, *prev;
bool drop_crc = true;
EBUG_ON(ptr < &ptrs.start->ptr ||
ptr >= &ptrs.end->ptr);
EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
src = extent_entry_next(to_entry(ptr));
if (src != ptrs.end &&
!extent_entry_is_crc(src))
drop_crc = false;
dst = to_entry(ptr);
while ((prev = extent_entry_prev(ptrs, dst))) {
if (extent_entry_is_ptr(prev))
break;
if (extent_entry_is_crc(prev)) {
if (drop_crc)
dst = prev;
break;
}
dst = prev;
}
memmove_u64s_down(dst, src,
(u64 *) ptrs.end - (u64 *) src);
k.k->u64s -= (u64 *) src - (u64 *) dst;
return dst;
}
void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
{
struct bch_extent_ptr *ptr;
bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
}
const struct bch_extent_ptr *
bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
bkey_for_each_ptr(ptrs, ptr)
if (ptr->dev == dev)
return ptr;
return NULL;
}
bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
bkey_for_each_ptr(ptrs, ptr)
if (bch2_dev_in_target(c, ptr->dev, target) &&
(!ptr->cached ||
!ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
return true;
return false;
}
bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
struct bch_extent_ptr m, u64 offset)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (p.ptr.dev == m.dev &&
p.ptr.gen == m.gen &&
(s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
(s64) m.offset - offset)
return true;
return false;
}
/*
* bch_extent_normalize - clean up an extent, dropping stale pointers etc.
*
......@@ -1027,245 +917,307 @@ bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
/* will only happen if all pointers were cached: */
if (!bkey_val_u64s(k.k))
if (!bch2_bkey_nr_ptrs(k.s_c))
k.k->type = KEY_TYPE_discard;
return bkey_whiteout(k.k);
}
void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
unsigned target,
unsigned nr_desired_replicas)
void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
union bch_extent_entry *entry;
struct extent_ptr_decoded p;
int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc;
const struct bch_extent_ptr *ptr;
const struct bch_extent_stripe_ptr *ec;
struct bch_dev *ca;
bool first = true;
if (target && extra > 0)
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
int n = bch2_extent_ptr_durability(c, p);
bkey_extent_entry_for_each(ptrs, entry) {
if (!first)
pr_buf(out, " ");
if (n && n <= extra &&
!bch2_dev_in_target(c, p.ptr.dev, target)) {
entry->ptr.cached = true;
extra -= n;
}
}
switch (__extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
ptr = entry_to_ptr(entry);
ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
? bch_dev_bkey_exists(c, ptr->dev)
: NULL;
if (extra > 0)
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
int n = bch2_extent_ptr_durability(c, p);
pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
(u64) ptr->offset, ptr->gen,
ptr->cached ? " cached" : "",
ca && ptr_stale(ca, ptr)
? " stale" : "");
break;
case BCH_EXTENT_ENTRY_crc32:
case BCH_EXTENT_ENTRY_crc64:
case BCH_EXTENT_ENTRY_crc128:
crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
if (n && n <= extra) {
entry->ptr.cached = true;
extra -= n;
pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
crc.compressed_size,
crc.uncompressed_size,
crc.offset, crc.nonce,
crc.csum_type,
crc.compression_type);
break;
case BCH_EXTENT_ENTRY_stripe_ptr:
ec = &entry->stripe_ptr;
pr_buf(out, "ec: idx %llu block %u",
(u64) ec->idx, ec->block);
break;
default:
pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
return;
}
first = false;
}
}
enum merge_result bch2_extent_merge(struct bch_fs *c,
struct bkey_s _l, struct bkey_s _r)
static const char *extent_ptr_invalid(const struct bch_fs *c,
struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
unsigned size_ondisk,
bool metadata)
{
struct bkey_s_extent l = bkey_s_to_extent(_l);
struct bkey_s_extent r = bkey_s_to_extent(_r);
union bch_extent_entry *en_l = l.v->start;
union bch_extent_entry *en_r = r.v->start;
struct bch_extent_crc_unpacked crc_l, crc_r;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr2;
struct bch_dev *ca;
if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
return BCH_MERGE_NOMERGE;
if (!bch2_dev_exists2(c, ptr->dev))
return "pointer to invalid device";
crc_l = bch2_extent_crc_unpack(l.k, NULL);
ca = bch_dev_bkey_exists(c, ptr->dev);
if (!ca)
return "pointer to invalid device";
extent_for_each_entry(l, en_l) {
en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
bkey_for_each_ptr(ptrs, ptr2)
if (ptr != ptr2 && ptr->dev == ptr2->dev)
return "multiple pointers to same device";
if (extent_entry_type(en_l) != extent_entry_type(en_r))
return BCH_MERGE_NOMERGE;
if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
return "offset past end of device";
switch (extent_entry_type(en_l)) {
case BCH_EXTENT_ENTRY_ptr: {
const struct bch_extent_ptr *lp = &en_l->ptr;
const struct bch_extent_ptr *rp = &en_r->ptr;
struct bch_dev *ca;
if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
return "offset before first bucket";
if (lp->offset + crc_l.compressed_size != rp->offset ||
lp->dev != rp->dev ||
lp->gen != rp->gen)
return BCH_MERGE_NOMERGE;
if (bucket_remainder(ca, ptr->offset) +
size_ondisk > ca->mi.bucket_size)
return "spans multiple buckets";
/* We don't allow extents to straddle buckets: */
ca = bch_dev_bkey_exists(c, lp->dev);
return NULL;
}
if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
return BCH_MERGE_NOMERGE;
const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc;
unsigned size_ondisk = k.k->size;
const char *reason;
unsigned nonce = UINT_MAX;
if (k.k->type == KEY_TYPE_btree_ptr)
size_ondisk = c->opts.btree_node_size;
bkey_extent_entry_for_each(ptrs, entry) {
if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
return "invalid extent entry type";
break;
}
case BCH_EXTENT_ENTRY_stripe_ptr:
if (en_l->stripe_ptr.block != en_r->stripe_ptr.block ||
en_l->stripe_ptr.idx != en_r->stripe_ptr.idx)
return BCH_MERGE_NOMERGE;
if (k.k->type == KEY_TYPE_btree_ptr &&
!extent_entry_is_ptr(entry))
return "has non ptr field";
switch (extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
reason = extent_ptr_invalid(c, k, &entry->ptr,
size_ondisk, false);
if (reason)
return reason;
break;
case BCH_EXTENT_ENTRY_crc32:
case BCH_EXTENT_ENTRY_crc64:
case BCH_EXTENT_ENTRY_crc128:
crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
if (crc_l.csum_type != crc_r.csum_type ||
crc_l.compression_type != crc_r.compression_type ||
crc_l.nonce != crc_r.nonce)
return BCH_MERGE_NOMERGE;
if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
crc_r.offset)
return BCH_MERGE_NOMERGE;
crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
if (!bch2_checksum_mergeable(crc_l.csum_type))
return BCH_MERGE_NOMERGE;
if (crc.offset + crc.live_size >
crc.uncompressed_size)
return "checksum offset + key size > uncompressed size";
if (crc_l.compression_type)
return BCH_MERGE_NOMERGE;
size_ondisk = crc.compressed_size;
if (crc_l.csum_type &&
crc_l.uncompressed_size +
crc_r.uncompressed_size > c->sb.encoded_extent_max)
return BCH_MERGE_NOMERGE;
if (!bch2_checksum_type_valid(c, crc.csum_type))
return "invalid checksum type";
if (crc_l.uncompressed_size + crc_r.uncompressed_size - 1 >
bch2_crc_field_size_max[extent_entry_type(en_l)])
return BCH_MERGE_NOMERGE;
if (crc.compression_type >= BCH_COMPRESSION_NR)
return "invalid compression type";
if (bch2_csum_type_is_encryption(crc.csum_type)) {
if (nonce == UINT_MAX)
nonce = crc.offset + crc.nonce;
else if (nonce != crc.offset + crc.nonce)
return "incorrect nonce";
}
break;
case BCH_EXTENT_ENTRY_stripe_ptr:
break;
default:
return BCH_MERGE_NOMERGE;
}
}
extent_for_each_entry(l, en_l) {
struct bch_extent_crc_unpacked crc_l, crc_r;
return NULL;
}
en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
{
union bch_extent_entry *entry;
u64 *d = (u64 *) bkeyp_val(f, k);
unsigned i;
if (!extent_entry_is_crc(en_l))
continue;
for (i = 0; i < bkeyp_val_u64s(f, k); i++)
d[i] = swab64(d[i]);
crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
for (entry = (union bch_extent_entry *) d;
entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
entry = extent_entry_next(entry)) {
switch (extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
break;
case BCH_EXTENT_ENTRY_crc32:
entry->crc32.csum = swab32(entry->crc32.csum);
break;
case BCH_EXTENT_ENTRY_crc64:
entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
break;
case BCH_EXTENT_ENTRY_crc128:
entry->crc128.csum.hi = (__force __le64)
swab64((__force u64) entry->crc128.csum.hi);
entry->crc128.csum.lo = (__force __le64)
swab64((__force u64) entry->crc128.csum.lo);
break;
case BCH_EXTENT_ENTRY_stripe_ptr:
break;
}
}
}
crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
crc_l.csum,
crc_r.csum,
crc_r.uncompressed_size << 9);
/* Generic extent code: */
crc_l.uncompressed_size += crc_r.uncompressed_size;
crc_l.compressed_size += crc_r.compressed_size;
int bch2_cut_front_s(struct bpos where, struct bkey_s k)
{
unsigned new_val_u64s = bkey_val_u64s(k.k);
int val_u64s_delta;
u64 sub;
bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
extent_entry_type(en_l));
}
if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
return 0;
bch2_key_resize(l.k, l.k->size + r.k->size);
EBUG_ON(bkey_cmp(where, k.k->p) > 0);
return BCH_MERGE_MERGE;
}
sub = where.offset - bkey_start_offset(k.k);
bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
unsigned nr_replicas)
{
struct btree_trans trans;
struct btree_iter *iter;
struct bpos end = pos;
struct bkey_s_c k;
bool ret = true;
int err;
k.k->size -= sub;
end.offset += size;
if (!k.k->size) {
k.k->type = KEY_TYPE_deleted;
new_val_u64s = 0;
}
bch2_trans_init(&trans, c, 0, 0);
switch (k.k->type) {
case KEY_TYPE_extent:
case KEY_TYPE_reflink_v: {
struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
union bch_extent_entry *entry;
bool seen_crc = false;
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
BTREE_ITER_SLOTS, k, err) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
bkey_extent_entry_for_each(ptrs, entry) {
switch (extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
if (!seen_crc)
entry->ptr.offset += sub;
break;
if (nr_replicas > bch2_bkey_nr_ptrs_allocated(k)) {
ret = false;
case BCH_EXTENT_ENTRY_crc32:
entry->crc32.offset += sub;
break;
case BCH_EXTENT_ENTRY_crc64:
entry->crc64.offset += sub;
break;
case BCH_EXTENT_ENTRY_crc128:
entry->crc128.offset += sub;
break;
case BCH_EXTENT_ENTRY_stripe_ptr:
break;
}
}
bch2_trans_exit(&trans);
return ret;
}
unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
{
unsigned ret = 0;
switch (k.k->type) {
case KEY_TYPE_extent: {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
if (extent_entry_is_crc(entry))
seen_crc = true;
}
extent_for_each_ptr_decode(e, p, entry)
ret += !p.ptr.cached &&
p.crc.compression_type == BCH_COMPRESSION_NONE;
break;
}
case KEY_TYPE_reservation:
ret = bkey_s_c_to_reservation(k).v->nr_replicas;
case KEY_TYPE_reflink_p: {
struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
le64_add_cpu(&p.v->idx, sub);
break;
}
case KEY_TYPE_inline_data: {
struct bkey_s_inline_data d = bkey_s_to_inline_data(k);
return ret;
}
/* KEY_TYPE_reservation: */
sub = min_t(u64, sub << 9, bkey_val_bytes(d.k));
const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
memmove(d.v->data,
d.v->data + sub,
bkey_val_bytes(d.k) - sub);
if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
return "incorrect value size";
new_val_u64s -= sub >> 3;
break;
}
}
if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
return "invalid nr_replicas";
val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
BUG_ON(val_u64s_delta < 0);
return NULL;
set_bkey_val_u64s(k.k, new_val_u64s);
memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
return -val_u64s_delta;
}
void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
int bch2_cut_back_s(struct bpos where, struct bkey_s k)
{
struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
unsigned new_val_u64s = bkey_val_u64s(k.k);
int val_u64s_delta;
u64 len = 0;
pr_buf(out, "generation %u replicas %u",
le32_to_cpu(r.v->generation),
r.v->nr_replicas);
}
if (bkey_cmp(where, k.k->p) >= 0)
return 0;
enum merge_result bch2_reservation_merge(struct bch_fs *c,
struct bkey_s _l, struct bkey_s _r)
{
struct bkey_s_reservation l = bkey_s_to_reservation(_l);
struct bkey_s_reservation r = bkey_s_to_reservation(_r);
EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
if (l.v->generation != r.v->generation ||
l.v->nr_replicas != r.v->nr_replicas)
return BCH_MERGE_NOMERGE;
len = where.offset - bkey_start_offset(k.k);
if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
bch2_key_resize(l.k, KEY_SIZE_MAX);
bch2_cut_front_s(l.k->p, r.s);
return BCH_MERGE_PARTIAL;
k.k->p = where;
k.k->size = len;
if (!len) {
k.k->type = KEY_TYPE_deleted;
new_val_u64s = 0;
}
bch2_key_resize(l.k, l.k->size + r.k->size);
switch (k.k->type) {
case KEY_TYPE_inline_data:
new_val_u64s = min(new_val_u64s, k.k->size << 6);
break;
}
return BCH_MERGE_MERGE;
val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
BUG_ON(val_u64s_delta < 0);
set_bkey_val_u64s(k.k, new_val_u64s);
memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
return -val_u64s_delta;
}
......@@ -40,6 +40,9 @@ struct btree_insert_entry;
(union bch_extent_entry *) (_entry)); \
})
#define extent_entry_next(_entry) \
((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
static inline unsigned
__extent_entry_type(const union bch_extent_entry *e)
{
......@@ -185,10 +188,52 @@ struct bkey_ptrs {
union bch_extent_entry *end;
};
/* iterate over bkey ptrs */
static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
{
switch (k.k->type) {
case KEY_TYPE_btree_ptr: {
struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
return (struct bkey_ptrs_c) {
to_entry(&e.v->start[0]),
to_entry(extent_entry_last(e))
};
}
case KEY_TYPE_extent: {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
return (struct bkey_ptrs_c) {
e.v->start,
extent_entry_last(e)
};
}
case KEY_TYPE_stripe: {
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
return (struct bkey_ptrs_c) {
to_entry(&s.v->ptrs[0]),
to_entry(&s.v->ptrs[s.v->nr_blocks]),
};
}
case KEY_TYPE_reflink_v: {
struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
#define extent_entry_next(_entry) \
((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
return (struct bkey_ptrs_c) {
r.v->start,
bkey_val_end(r),
};
}
default:
return (struct bkey_ptrs_c) { NULL, NULL };
}
}
static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
{
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
return (struct bkey_ptrs) {
(void *) p.start,
(void *) p.end
};
}
#define __bkey_extent_entry_for_each_from(_start, _end, _entry) \
for ((_entry) = (_start); \
......@@ -281,96 +326,26 @@ out: \
#define bkey_for_each_crc(_k, _p, _crc, _iter) \
__bkey_for_each_crc(_k, (_p).start, (_p).end, _crc, _iter)
/* utility code common to all keys with pointers: */
static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
{
switch (k.k->type) {
case KEY_TYPE_btree_ptr: {
struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
return (struct bkey_ptrs_c) {
to_entry(&e.v->start[0]),
to_entry(extent_entry_last(e))
};
}
case KEY_TYPE_extent: {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
return (struct bkey_ptrs_c) {
e.v->start,
extent_entry_last(e)
};
}
case KEY_TYPE_stripe: {
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
return (struct bkey_ptrs_c) {
to_entry(&s.v->ptrs[0]),
to_entry(&s.v->ptrs[s.v->nr_blocks]),
};
}
case KEY_TYPE_reflink_v: {
struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
return (struct bkey_ptrs_c) {
r.v->start,
bkey_val_end(r),
};
}
default:
return (struct bkey_ptrs_c) { NULL, NULL };
}
}
static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
{
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
/* Iterate over pointers in KEY_TYPE_extent: */
return (struct bkey_ptrs) {
(void *) p.start,
(void *) p.end
};
}
static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
{
struct bch_devs_list ret = (struct bch_devs_list) { 0 };
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
bkey_for_each_ptr(p, ptr)
ret.devs[ret.nr++] = ptr->dev;
return ret;
}
static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
{
struct bch_devs_list ret = (struct bch_devs_list) { 0 };
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
bkey_for_each_ptr(p, ptr)
if (!ptr->cached)
ret.devs[ret.nr++] = ptr->dev;
#define extent_for_each_entry_from(_e, _entry, _start) \
__bkey_extent_entry_for_each_from(_start, \
extent_entry_last(_e),_entry)
return ret;
}
#define extent_for_each_entry(_e, _entry) \
extent_for_each_entry_from(_e, _entry, (_e).v->start)
static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
{
struct bch_devs_list ret = (struct bch_devs_list) { 0 };
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
#define extent_ptr_next(_e, _ptr) \
__bkey_ptr_next(_ptr, extent_entry_last(_e))
bkey_for_each_ptr(p, ptr)
if (ptr->cached)
ret.devs[ret.nr++] = ptr->dev;
#define extent_for_each_ptr(_e, _ptr) \
__bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
return ret;
}
#define extent_for_each_ptr_decode(_e, _ptr, _entry) \
__bkey_for_each_ptr_decode((_e).k, (_e).v->start, \
extent_entry_last(_e), _ptr, _entry)
unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
unsigned bch2_bkey_nr_dirty_ptrs(struct bkey_s_c);
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
/* utility code common to all keys with pointers: */
void bch2_mark_io_failure(struct bch_io_failures *,
struct extent_ptr_decoded *);
......@@ -378,22 +353,12 @@ int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
struct bch_io_failures *,
struct extent_ptr_decoded *);
void bch2_bkey_append_ptr(struct bkey_i *, struct bch_extent_ptr);
void bch2_bkey_drop_device(struct bkey_s, unsigned);
const struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s_c, unsigned);
bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
const char *bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c);
/* bch_btree_ptr: */
/* KEY_TYPE_btree_ptr: */
const char *bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_btree_ptr_debugcheck(struct bch_fs *, struct bkey_s_c);
void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
void bch2_ptr_swab(const struct bkey_format *, struct bkey_packed *);
#define bch2_bkey_ops_btree_ptr (struct bkey_ops) { \
.key_invalid = bch2_btree_ptr_invalid, \
......@@ -402,12 +367,11 @@ void bch2_ptr_swab(const struct bkey_format *, struct bkey_packed *);
.swab = bch2_ptr_swab, \
}
/* bch_extent: */
/* KEY_TYPE_extent: */
const char *bch2_extent_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_extent_debugcheck(struct bch_fs *, struct bkey_s_c);
void bch2_extent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
enum merge_result bch2_extent_merge(struct bch_fs *,
struct bkey_s, struct bkey_s);
......@@ -420,7 +384,7 @@ enum merge_result bch2_extent_merge(struct bch_fs *,
.key_merge = bch2_extent_merge, \
}
/* bch_reservation: */
/* KEY_TYPE_reservation: */
const char *bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
......@@ -433,13 +397,15 @@ enum merge_result bch2_reservation_merge(struct bch_fs *,
.key_merge = bch2_reservation_merge, \
}
void bch2_bkey_mark_replicas_cached(struct bch_fs *, struct bkey_s,
unsigned, unsigned);
/* Extent checksum entries: */
unsigned bch2_extent_is_compressed(struct bkey_s_c);
bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
struct bch_extent_crc_unpacked);
bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
void bch2_extent_crc_append(struct bkey_i *,
struct bch_extent_crc_unpacked);
bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
struct bch_extent_ptr, u64);
/* Generic code for keys with pointers: */
static inline bool bkey_extent_is_direct_data(const struct bkey *k)
{
......@@ -477,34 +443,57 @@ static inline bool bkey_extent_is_allocation(const struct bkey *k)
}
}
/* Extent entry iteration: */
static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
{
struct bch_devs_list ret = (struct bch_devs_list) { 0 };
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
#define extent_for_each_entry_from(_e, _entry, _start) \
__bkey_extent_entry_for_each_from(_start, \
extent_entry_last(_e),_entry)
bkey_for_each_ptr(p, ptr)
ret.devs[ret.nr++] = ptr->dev;
#define extent_for_each_entry(_e, _entry) \
extent_for_each_entry_from(_e, _entry, (_e).v->start)
return ret;
}
#define extent_ptr_next(_e, _ptr) \
__bkey_ptr_next(_ptr, extent_entry_last(_e))
static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
{
struct bch_devs_list ret = (struct bch_devs_list) { 0 };
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
#define extent_for_each_ptr(_e, _ptr) \
__bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
bkey_for_each_ptr(p, ptr)
if (!ptr->cached)
ret.devs[ret.nr++] = ptr->dev;
#define extent_for_each_ptr_decode(_e, _ptr, _entry) \
__bkey_for_each_ptr_decode((_e).k, (_e).v->start, \
extent_entry_last(_e), _ptr, _entry)
return ret;
}
void bch2_extent_crc_append(struct bkey_i *,
struct bch_extent_crc_unpacked);
void bch2_extent_ptr_decoded_append(struct bkey_i *,
struct extent_ptr_decoded *);
static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
{
struct bch_devs_list ret = (struct bch_devs_list) { 0 };
struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
struct bch_extent_crc_unpacked);
bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
bkey_for_each_ptr(p, ptr)
if (ptr->cached)
ret.devs[ret.nr++] = ptr->dev;
return ret;
}
unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
bool bch2_check_range_allocated(struct bch_fs *, struct bpos, u64, unsigned);
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
void bch2_bkey_mark_replicas_cached(struct bch_fs *, struct bkey_s,
unsigned, unsigned);
void bch2_bkey_append_ptr(struct bkey_i *, struct bch_extent_ptr);
void bch2_extent_ptr_decoded_append(struct bkey_i *,
struct extent_ptr_decoded *);
union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
struct bch_extent_ptr *);
......@@ -525,6 +514,22 @@ do { \
} \
} while (0)
void bch2_bkey_drop_device(struct bkey_s, unsigned);
const struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s_c, unsigned);
bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
struct bch_extent_ptr, u64);
bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
const char *bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_ptr_swab(const struct bkey_format *, struct bkey_packed *);
/* Generic extent code: */
int bch2_cut_front_s(struct bpos, struct bkey_s);
int bch2_cut_back_s(struct bpos, struct bkey_s);
......@@ -568,7 +573,4 @@ static inline void extent_save(struct btree *b, struct bkey_packed *dst,
BUG_ON(!bch2_bkey_pack_key(dst, src, f));
}
bool bch2_check_range_allocated(struct bch_fs *, struct bpos, u64, unsigned);
unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
#endif /* _BCACHEFS_EXTENTS_H */
......@@ -675,7 +675,7 @@ static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
struct bvec_iter iter;
struct bio_vec bv;
unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
? 0 : bch2_bkey_nr_ptrs_allocated(k);
? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
unsigned state = k.k->type == KEY_TYPE_reservation
? SECTOR_RESERVED
: SECTOR_ALLOCATED;
......@@ -2543,7 +2543,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
} else {
/* We might end up splitting compressed extents: */
unsigned nr_ptrs =
bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(copy.k));
bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
ret = bch2_disk_reservation_get(c, &disk_res,
copy.k->k.size, nr_ptrs,
......@@ -2669,7 +2669,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
bch2_cut_back(end_pos, &reservation.k_i);
sectors = reservation.k.size;
reservation.v.nr_replicas = bch2_bkey_nr_dirty_ptrs(k);
reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
if (!bkey_extent_is_allocation(k.k)) {
ret = bch2_quota_reservation_add(c, inode,
......@@ -2680,7 +2680,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
}
if (reservation.v.nr_replicas < replicas ||
bch2_extent_is_compressed(k)) {
bch2_bkey_sectors_compressed(k)) {
ret = bch2_disk_reservation_get(c, &disk_res, sectors,
replicas, 0);
if (unlikely(ret))
......
......@@ -202,8 +202,8 @@ static int sum_sector_overwrites(struct btree_trans *trans,
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, old, ret) {
if (!may_allocate &&
bch2_bkey_nr_ptrs_allocated(old) <
bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(new))) {
bch2_bkey_nr_ptrs_fully_allocated(old) <
bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new))) {
ret = -ENOSPC;
break;
}
......
......@@ -134,11 +134,11 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
* If we're not fully overwriting @k, and it's compressed, we
* need a reservation for all the pointers in @insert
*/
nr = bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(insert)) -
nr = bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(insert)) -
m->nr_ptrs_reserved;
if (insert->k.size < k.k->size &&
bch2_extent_is_compressed(k) &&
bch2_bkey_sectors_compressed(k) &&
nr > 0) {
ret = bch2_disk_reservation_add(c, &op->res,
keylist_sectors(keys) * nr, 0);
......@@ -250,7 +250,7 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
*/
#if 0
int nr = (int) io_opts.data_replicas -
bch2_bkey_nr_dirty_ptrs(k);
bch2_bkey_nr_ptrs_allocated(k);
#endif
int nr = (int) io_opts.data_replicas;
......@@ -599,7 +599,7 @@ static int __bch2_move_data(struct bch_fs *c,
if (rate)
bch2_ratelimit_increment(rate, k.k->size);
next:
atomic64_add(k.k->size * bch2_bkey_nr_dirty_ptrs(k),
atomic64_add(k.k->size * bch2_bkey_nr_ptrs_allocated(k),
&stats->sectors_seen);
next_nondata:
bch2_btree_iter_next(iter);
......
......@@ -254,7 +254,7 @@ static int bch2_extent_replay_key(struct bch_fs *c, enum btree_id btree_id,
* Some extents aren't equivalent - w.r.t. what the triggers do
* - if they're split:
*/
bool remark_if_split = bch2_extent_is_compressed(bkey_i_to_s_c(k)) ||
bool remark_if_split = bch2_bkey_sectors_compressed(bkey_i_to_s_c(k)) ||
k->k.type == KEY_TYPE_reflink_p;
bool remark = false;
int ret;
......@@ -289,7 +289,7 @@ static int bch2_extent_replay_key(struct bch_fs *c, enum btree_id btree_id,
bkey_cmp(atomic_end, k->k.p) < 0) {
ret = bch2_disk_reservation_add(c, &disk_res,
k->k.size *
bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(k)),
bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(k)),
BCH_DISK_RESERVATION_NOFAIL);
BUG_ON(ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment