Commit 1742237b authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: extent_for_each_ptr_decode()

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 642d66d1
......@@ -546,20 +546,19 @@ static int __disk_sectors(struct bch_extent_crc_unpacked crc, unsigned sectors)
*/
static void bch2_mark_pointer(struct bch_fs *c,
struct bkey_s_c_extent e,
const struct bch_extent_ptr *ptr,
struct bch_extent_crc_unpacked crc,
struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type,
unsigned replicas,
struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
struct bucket_mark old, new;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bucket *g = PTR_BUCKET(ca, ptr);
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket *g = PTR_BUCKET(ca, &p.ptr);
s64 uncompressed_sectors = sectors;
u64 v;
if (crc.compression_type) {
if (p.crc.compression_type) {
unsigned old_sectors, new_sectors;
if (sectors > 0) {
......@@ -570,8 +569,8 @@ static void bch2_mark_pointer(struct bch_fs *c,
new_sectors = e.k->size + sectors;
}
sectors = -__disk_sectors(crc, old_sectors)
+__disk_sectors(crc, new_sectors);
sectors = -__disk_sectors(p.crc, old_sectors)
+__disk_sectors(p.crc, new_sectors);
}
/*
......@@ -584,8 +583,8 @@ static void bch2_mark_pointer(struct bch_fs *c,
* caller's responsibility to not apply @fs_usage if gc is in progress.
*/
fs_usage->replicas
[!ptr->cached && replicas ? replicas - 1 : 0].data
[!ptr->cached ? data_type : BCH_DATA_CACHED] +=
[!p.ptr.cached && replicas ? replicas - 1 : 0].data
[!p.ptr.cached ? data_type : BCH_DATA_CACHED] +=
uncompressed_sectors;
if (flags & BCH_BUCKET_MARK_GC_WILL_VISIT) {
......@@ -607,14 +606,14 @@ static void bch2_mark_pointer(struct bch_fs *c,
* the allocator invalidating a bucket after we've already
* checked the gen
*/
if (gen_after(new.gen, ptr->gen)) {
if (gen_after(new.gen, p.ptr.gen)) {
BUG_ON(!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags));
EBUG_ON(!ptr->cached &&
EBUG_ON(!p.ptr.cached &&
test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags));
return;
}
if (!ptr->cached)
if (!p.ptr.cached)
checked_add(new.dirty_sectors, sectors);
else
checked_add(new.cached_sectors, sectors);
......@@ -695,13 +694,13 @@ void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
case BCH_EXTENT:
case BCH_EXTENT_CACHED: {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const struct bch_extent_ptr *ptr;
struct bch_extent_crc_unpacked crc;
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
BUG_ON(!sectors);
extent_for_each_ptr_crc(e, ptr, crc)
bch2_mark_pointer(c, e, ptr, crc, sectors, data_type,
extent_for_each_ptr_decode(e, p, entry)
bch2_mark_pointer(c, e, p, sectors, data_type,
replicas, stats, journal_seq, flags);
break;
}
......
......@@ -231,21 +231,21 @@ unsigned bch2_extent_durability(struct bch_fs *c, struct bkey_s_c_extent e)
unsigned bch2_extent_is_compressed(struct bkey_s_c k)
{
struct bkey_s_c_extent e;
const struct bch_extent_ptr *ptr;
struct bch_extent_crc_unpacked crc;
unsigned ret = 0;
switch (k.k->type) {
case BCH_EXTENT:
case BCH_EXTENT_CACHED:
e = bkey_s_c_to_extent(k);
case BCH_EXTENT_CACHED: {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
extent_for_each_ptr_crc(e, ptr, crc)
if (!ptr->cached &&
crc.compression_type != BCH_COMPRESSION_NONE &&
crc.compressed_size < crc.live_size)
ret = max_t(unsigned, ret, crc.compressed_size);
extent_for_each_ptr_decode(e, p, entry)
if (!p.ptr.cached &&
p.crc.compression_type != BCH_COMPRESSION_NONE &&
p.crc.compressed_size < p.crc.live_size)
ret = max_t(unsigned, ret, p.crc.compressed_size);
}
}
return ret;
......@@ -254,17 +254,17 @@ unsigned bch2_extent_is_compressed(struct bkey_s_c k)
bool bch2_extent_matches_ptr(struct bch_fs *c, struct bkey_s_c_extent e,
struct bch_extent_ptr m, u64 offset)
{
const struct bch_extent_ptr *ptr;
struct bch_extent_crc_unpacked crc;
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
extent_for_each_ptr_crc(e, ptr, crc)
if (ptr->dev == m.dev &&
ptr->gen == m.gen &&
(s64) ptr->offset + crc.offset - bkey_start_offset(e.k) ==
extent_for_each_ptr_decode(e, p, entry)
if (p.ptr.dev == m.dev &&
p.ptr.gen == m.gen &&
(s64) p.ptr.offset + p.crc.offset - bkey_start_offset(e.k) ==
(s64) m.offset - offset)
return ptr;
return true;
return NULL;
return false;
}
/* Doesn't cleanup redundant crcs */
......@@ -323,7 +323,7 @@ bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
struct bch_extent_crc_unpacked n)
{
struct bch_extent_crc_unpacked u;
struct bch_extent_ptr *ptr;
struct extent_ptr_decoded p;
union bch_extent_entry *i;
/* Find a checksum entry that covers only live data: */
......@@ -345,11 +345,11 @@ bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
bch2_extent_crc_append(e, n);
restart_narrow_pointers:
extent_for_each_ptr_crc(extent_i_to_s(e), ptr, u)
if (can_narrow_crc(u, n)) {
ptr->offset += u.offset;
extent_ptr_append(e, *ptr);
__bch2_extent_drop_ptr(extent_i_to_s(e), ptr);
extent_for_each_ptr_decode(extent_i_to_s(e), p, i)
if (can_narrow_crc(p.crc, n)) {
i->ptr.offset += p.crc.offset;
extent_ptr_append(e, i->ptr);
__bch2_extent_drop_ptr(extent_i_to_s(e), &i->ptr);
goto restart_narrow_pointers;
}
......@@ -475,6 +475,8 @@ void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
entry = extent_entry_next(entry)) {
switch (extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
break;
case BCH_EXTENT_ENTRY_crc32:
entry->crc32.csum = swab32(entry->crc32.csum);
break;
......@@ -488,8 +490,6 @@ void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
entry->crc128.csum.lo = (__force __le64)
swab64((__force u64) entry->crc128.csum.lo);
break;
case BCH_EXTENT_ENTRY_ptr:
break;
}
}
break;
......@@ -605,28 +605,28 @@ static int extent_pick_read_device(struct bch_fs *c,
struct bch_devs_mask *avoid,
struct extent_ptr_decoded *pick)
{
const struct bch_extent_ptr *ptr;
struct bch_extent_crc_unpacked crc;
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
struct bch_dev *ca;
int ret = 0;
extent_for_each_ptr_crc(e, ptr, crc) {
ca = bch_dev_bkey_exists(c, ptr->dev);
extent_for_each_ptr_decode(e, p, entry) {
ca = bch_dev_bkey_exists(c, p.ptr.dev);
if (ptr->cached && ptr_stale(ca, ptr))
if (p.ptr.cached && ptr_stale(ca, &p.ptr))
continue;
if (avoid && test_bit(ptr->dev, avoid->d))
continue;
/*
* XXX: need to make avoid work correctly for stripe ptrs
*/
if (ret && !dev_latency_better(c, ptr, &pick->ptr))
if (avoid && test_bit(p.ptr.dev, avoid->d))
continue;
*pick = (struct extent_ptr_decoded) {
.ptr = *ptr,
.crc = crc,
};
if (ret && !dev_latency_better(c, &p.ptr, &pick->ptr))
continue;
*pick = p;
ret = 1;
}
......
......@@ -182,12 +182,24 @@ static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
{
return extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
switch (extent_entry_type(e)) {
case BCH_EXTENT_ENTRY_ptr:
return true;
default:
return false;
}
}
static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
{
return !extent_entry_is_ptr(e);
switch (extent_entry_type(e)) {
case BCH_EXTENT_ENTRY_crc32:
case BCH_EXTENT_ENTRY_crc64:
case BCH_EXTENT_ENTRY_crc128:
return true;
default:
return false;
}
}
union bch_extent_crc {
......@@ -310,23 +322,25 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
#define extent_for_each_entry(_e, _entry) \
extent_for_each_entry_from(_e, _entry, (_e).v->start)
/* Iterate over crcs only: */
/* Iterate over pointers only: */
#define __extent_crc_next(_e, _p) \
#define extent_ptr_next(_e, _ptr) \
({ \
typeof(&(_e).v->start[0]) _entry = _p; \
typeof(&(_e).v->start[0]) _entry; \
\
while ((_entry) < extent_entry_last(_e) && \
!extent_entry_is_crc(_entry)) \
(_entry) = extent_entry_next(_entry); \
extent_for_each_entry_from(_e, _entry, to_entry(_ptr)) \
if (extent_entry_is_ptr(_entry)) \
break; \
\
entry_to_crc(_entry < extent_entry_last(_e) ? _entry : NULL); \
_entry < extent_entry_last(_e) ? entry_to_ptr(_entry) : NULL; \
})
#define __extent_for_each_crc(_e, _crc) \
for ((_crc) = __extent_crc_next(_e, (_e).v->start); \
(_crc); \
(_crc) = __extent_crc_next(_e, extent_entry_next(to_entry(_crc))))
#define extent_for_each_ptr(_e, _ptr) \
for ((_ptr) = &(_e).v->start->ptr; \
((_ptr) = extent_ptr_next(_e, _ptr)); \
(_ptr)++)
/* Iterate over crcs only: */
#define extent_crc_next(_e, _crc, _iter) \
({ \
......@@ -347,43 +361,44 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
/* Iterate over pointers, with crcs: */
#define extent_ptr_crc_next(_e, _ptr, _crc) \
static inline struct extent_ptr_decoded
__extent_ptr_decoded_init(const struct bkey *k)
{
return (struct extent_ptr_decoded) {
.crc = bch2_extent_crc_unpack(k, NULL),
};
}
#define EXTENT_ITERATE_EC (1 << 0)
#define __extent_ptr_next_decode(_e, _ptr, _entry) \
({ \
__label__ out; \
typeof(&(_e).v->start[0]) _entry; \
\
extent_for_each_entry_from(_e, _entry, to_entry(_ptr)) \
if (extent_entry_is_crc(_entry)) { \
(_crc) = bch2_extent_crc_unpack((_e).k, entry_to_crc(_entry));\
} else { \
_ptr = entry_to_ptr(_entry); \
extent_for_each_entry_from(_e, _entry, _entry) \
switch (extent_entry_type(_entry)) { \
case BCH_EXTENT_ENTRY_ptr: \
(_ptr).ptr = _entry->ptr; \
goto out; \
case BCH_EXTENT_ENTRY_crc32: \
case BCH_EXTENT_ENTRY_crc64: \
case BCH_EXTENT_ENTRY_crc128: \
(_ptr).crc = bch2_extent_crc_unpack((_e).k, \
entry_to_crc(_entry)); \
break; \
} \
\
_ptr = NULL; \
out: \
_ptr; \
_entry < extent_entry_last(_e); \
})
#define extent_for_each_ptr_crc(_e, _ptr, _crc) \
for ((_crc) = bch2_extent_crc_unpack((_e).k, NULL), \
(_ptr) = &(_e).v->start->ptr; \
((_ptr) = extent_ptr_crc_next(_e, _ptr, _crc)); \
(_ptr)++)
/* Iterate over pointers only, and from a given position: */
#define extent_ptr_next(_e, _ptr) \
({ \
struct bch_extent_crc_unpacked _crc; \
\
extent_ptr_crc_next(_e, _ptr, _crc); \
})
#define extent_for_each_ptr_decode(_e, _ptr, _entry) \
for ((_ptr) = __extent_ptr_decoded_init((_e).k), \
(_entry) = (_e).v->start; \
__extent_ptr_next_decode(_e, _ptr, _entry); \
(_entry) = extent_entry_next(_entry))
#define extent_for_each_ptr(_e, _ptr) \
for ((_ptr) = &(_e).v->start->ptr; \
((_ptr) = extent_ptr_next(_e, _ptr)); \
(_ptr)++)
/* Iterate over pointers backwards: */
#define extent_ptr_prev(_e, _ptr) \
({ \
......
......@@ -920,12 +920,12 @@ static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
if (bkey_extent_is_data(k.k)) {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
struct bch_extent_crc_unpacked crc;
const union bch_extent_entry *i;
struct extent_ptr_decoded p;
extent_for_each_crc(e, crc, i)
want_full_extent |= ((crc.csum_type != 0) |
(crc.compression_type != 0));
extent_for_each_ptr_decode(e, p, i)
want_full_extent |= ((p.crc.csum_type != 0) |
(p.crc.compression_type != 0));
}
readpage_bio_extend(readpages_iter,
......
......@@ -1053,27 +1053,27 @@ static int bch2_fill_extent(struct fiemap_extent_info *info,
{
if (bkey_extent_is_data(&k->k)) {
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
const struct bch_extent_ptr *ptr;
struct bch_extent_crc_unpacked crc;
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
int ret;
extent_for_each_ptr_crc(e, ptr, crc) {
extent_for_each_ptr_decode(e, p, entry) {
int flags2 = 0;
u64 offset = ptr->offset;
u64 offset = p.ptr.offset;
if (crc.compression_type)
if (p.crc.compression_type)
flags2 |= FIEMAP_EXTENT_ENCODED;
else
offset += crc.offset;
offset += p.crc.offset;
if ((offset & (PAGE_SECTORS - 1)) ||
(e.k->size & (PAGE_SECTORS - 1)))
flags2 |= FIEMAP_EXTENT_NOT_ALIGNED;
ret = fiemap_fill_next_extent(info,
bkey_start_offset(e.k) << 9,
offset << 9,
e.k->size << 9, flags|flags2);
bkey_start_offset(e.k) << 9,
offset << 9,
e.k->size << 9, flags|flags2);
if (ret)
return ret;
}
......
......@@ -67,8 +67,8 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
struct bkey_i_extent *insert, *new =
bkey_i_to_extent(bch2_keylist_front(keys));
BKEY_PADDED(k) _new, _insert;
struct bch_extent_ptr *ptr;
struct bch_extent_crc_unpacked crc;
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
bool did_work = false;
int nr;
......@@ -99,14 +99,15 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
bch2_cut_back(insert->k.p, &new->k);
if (m->data_cmd == DATA_REWRITE) {
ptr = (struct bch_extent_ptr *)
struct bch_extent_ptr *ptr = (void *)
bch2_extent_has_device(extent_i_to_s_c(insert),
m->data_opts.rewrite_dev);
BUG_ON(!ptr);
bch2_extent_drop_ptr(extent_i_to_s(insert), ptr);
}
extent_for_each_ptr_crc(extent_i_to_s(new), ptr, crc) {
if (bch2_extent_has_device(extent_i_to_s_c(insert), ptr->dev)) {
extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) {
if (bch2_extent_has_device(extent_i_to_s_c(insert), p.ptr.dev)) {
/*
* raced with another move op? extent already
* has a pointer to the device we just wrote
......@@ -115,8 +116,8 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
continue;
}
bch2_extent_crc_append(insert, crc);
extent_ptr_append(insert, *ptr);
bch2_extent_crc_append(insert, p.crc);
extent_ptr_append(insert, p.ptr);
did_work = true;
}
......@@ -379,8 +380,8 @@ static int bch2_move_extent(struct bch_fs *c,
struct data_opts data_opts)
{
struct moving_io *io;
const struct bch_extent_ptr *ptr;
struct bch_extent_crc_unpacked crc;
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
unsigned sectors = e.k->size, pages;
int ret = -ENOMEM;
......@@ -393,8 +394,8 @@ static int bch2_move_extent(struct bch_fs *c,
SECTORS_IN_FLIGHT_PER_DEVICE);
/* write path might have to decompress data: */
extent_for_each_ptr_crc(e, ptr, crc)
sectors = max_t(unsigned, sectors, crc.uncompressed_size);
extent_for_each_ptr_decode(e, p, entry)
sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
io = kzalloc(sizeof(struct moving_io) +
......
......@@ -18,17 +18,16 @@
#include <linux/sched/cputime.h>
static inline bool rebalance_ptr_pred(struct bch_fs *c,
const struct bch_extent_ptr *ptr,
struct bch_extent_crc_unpacked crc,
struct extent_ptr_decoded p,
struct bch_io_opts *io_opts)
{
if (io_opts->background_target &&
!bch2_dev_in_target(c, ptr->dev, io_opts->background_target) &&
!ptr->cached)
!bch2_dev_in_target(c, p.ptr.dev, io_opts->background_target) &&
!p.ptr.cached)
return true;
if (io_opts->background_compression &&
crc.compression_type !=
p.crc.compression_type !=
bch2_compression_opt_to_type[io_opts->background_compression])
return true;
......@@ -39,8 +38,8 @@ void bch2_rebalance_add_key(struct bch_fs *c,
struct bkey_s_c k,
struct bch_io_opts *io_opts)
{
const struct bch_extent_ptr *ptr;
struct bch_extent_crc_unpacked crc;
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
struct bkey_s_c_extent e;
if (!bkey_extent_is_data(k.k))
......@@ -52,13 +51,13 @@ void bch2_rebalance_add_key(struct bch_fs *c,
e = bkey_s_c_to_extent(k);
extent_for_each_ptr_crc(e, ptr, crc)
if (rebalance_ptr_pred(c, ptr, crc, io_opts)) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
extent_for_each_ptr_decode(e, p, entry)
if (rebalance_ptr_pred(c, p, io_opts)) {
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
if (atomic64_add_return(crc.compressed_size,
if (atomic64_add_return(p.crc.compressed_size,
&ca->rebalance_work) ==
crc.compressed_size)
p.crc.compressed_size)
rebalance_wakeup(c);
}
}
......@@ -76,16 +75,16 @@ static enum data_cmd rebalance_pred(struct bch_fs *c, void *arg,
struct bch_io_opts *io_opts,
struct data_opts *data_opts)
{
const struct bch_extent_ptr *ptr;
struct bch_extent_crc_unpacked crc;
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
/* Make sure we have room to add a new pointer: */
if (bkey_val_u64s(e.k) + BKEY_EXTENT_PTR_U64s_MAX >
BKEY_EXTENT_VAL_U64s_MAX)
return DATA_SKIP;
extent_for_each_ptr_crc(e, ptr, crc)
if (rebalance_ptr_pred(c, ptr, crc, io_opts))
extent_for_each_ptr_decode(e, p, entry)
if (rebalance_ptr_pred(c, p, io_opts))
goto found;
return DATA_SKIP;
......
......@@ -283,19 +283,19 @@ static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0, k)
if (k.k->type == BCH_EXTENT) {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const struct bch_extent_ptr *ptr;
struct bch_extent_crc_unpacked crc;
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
extent_for_each_ptr_crc(e, ptr, crc) {
if (crc.compression_type == BCH_COMPRESSION_NONE) {
extent_for_each_ptr_decode(e, p, entry) {
if (p.crc.compression_type == BCH_COMPRESSION_NONE) {
nr_uncompressed_extents++;
uncompressed_sectors += e.k->size;
} else {
nr_compressed_extents++;
compressed_sectors_compressed +=
crc.compressed_size;
p.crc.compressed_size;
compressed_sectors_uncompressed +=
crc.uncompressed_size;
p.crc.uncompressed_size;
}
/* only looking at the first ptr */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment