Commit 1742237b authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: extent_for_each_ptr_decode()

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 642d66d1
...@@ -546,20 +546,19 @@ static int __disk_sectors(struct bch_extent_crc_unpacked crc, unsigned sectors) ...@@ -546,20 +546,19 @@ static int __disk_sectors(struct bch_extent_crc_unpacked crc, unsigned sectors)
*/ */
static void bch2_mark_pointer(struct bch_fs *c, static void bch2_mark_pointer(struct bch_fs *c,
struct bkey_s_c_extent e, struct bkey_s_c_extent e,
const struct bch_extent_ptr *ptr, struct extent_ptr_decoded p,
struct bch_extent_crc_unpacked crc,
s64 sectors, enum bch_data_type data_type, s64 sectors, enum bch_data_type data_type,
unsigned replicas, unsigned replicas,
struct bch_fs_usage *fs_usage, struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags) u64 journal_seq, unsigned flags)
{ {
struct bucket_mark old, new; struct bucket_mark old, new;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket *g = PTR_BUCKET(ca, ptr); struct bucket *g = PTR_BUCKET(ca, &p.ptr);
s64 uncompressed_sectors = sectors; s64 uncompressed_sectors = sectors;
u64 v; u64 v;
if (crc.compression_type) { if (p.crc.compression_type) {
unsigned old_sectors, new_sectors; unsigned old_sectors, new_sectors;
if (sectors > 0) { if (sectors > 0) {
...@@ -570,8 +569,8 @@ static void bch2_mark_pointer(struct bch_fs *c, ...@@ -570,8 +569,8 @@ static void bch2_mark_pointer(struct bch_fs *c,
new_sectors = e.k->size + sectors; new_sectors = e.k->size + sectors;
} }
sectors = -__disk_sectors(crc, old_sectors) sectors = -__disk_sectors(p.crc, old_sectors)
+__disk_sectors(crc, new_sectors); +__disk_sectors(p.crc, new_sectors);
} }
/* /*
...@@ -584,8 +583,8 @@ static void bch2_mark_pointer(struct bch_fs *c, ...@@ -584,8 +583,8 @@ static void bch2_mark_pointer(struct bch_fs *c,
* caller's responsibility to not apply @fs_usage if gc is in progress. * caller's responsibility to not apply @fs_usage if gc is in progress.
*/ */
fs_usage->replicas fs_usage->replicas
[!ptr->cached && replicas ? replicas - 1 : 0].data [!p.ptr.cached && replicas ? replicas - 1 : 0].data
[!ptr->cached ? data_type : BCH_DATA_CACHED] += [!p.ptr.cached ? data_type : BCH_DATA_CACHED] +=
uncompressed_sectors; uncompressed_sectors;
if (flags & BCH_BUCKET_MARK_GC_WILL_VISIT) { if (flags & BCH_BUCKET_MARK_GC_WILL_VISIT) {
...@@ -607,14 +606,14 @@ static void bch2_mark_pointer(struct bch_fs *c, ...@@ -607,14 +606,14 @@ static void bch2_mark_pointer(struct bch_fs *c,
* the allocator invalidating a bucket after we've already * the allocator invalidating a bucket after we've already
* checked the gen * checked the gen
*/ */
if (gen_after(new.gen, ptr->gen)) { if (gen_after(new.gen, p.ptr.gen)) {
BUG_ON(!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags)); BUG_ON(!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags));
EBUG_ON(!ptr->cached && EBUG_ON(!p.ptr.cached &&
test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)); test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags));
return; return;
} }
if (!ptr->cached) if (!p.ptr.cached)
checked_add(new.dirty_sectors, sectors); checked_add(new.dirty_sectors, sectors);
else else
checked_add(new.cached_sectors, sectors); checked_add(new.cached_sectors, sectors);
...@@ -695,13 +694,13 @@ void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, ...@@ -695,13 +694,13 @@ void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
case BCH_EXTENT: case BCH_EXTENT:
case BCH_EXTENT_CACHED: { case BCH_EXTENT_CACHED: {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k); struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const struct bch_extent_ptr *ptr; const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc; struct extent_ptr_decoded p;
BUG_ON(!sectors); BUG_ON(!sectors);
extent_for_each_ptr_crc(e, ptr, crc) extent_for_each_ptr_decode(e, p, entry)
bch2_mark_pointer(c, e, ptr, crc, sectors, data_type, bch2_mark_pointer(c, e, p, sectors, data_type,
replicas, stats, journal_seq, flags); replicas, stats, journal_seq, flags);
break; break;
} }
......
...@@ -231,21 +231,21 @@ unsigned bch2_extent_durability(struct bch_fs *c, struct bkey_s_c_extent e) ...@@ -231,21 +231,21 @@ unsigned bch2_extent_durability(struct bch_fs *c, struct bkey_s_c_extent e)
unsigned bch2_extent_is_compressed(struct bkey_s_c k) unsigned bch2_extent_is_compressed(struct bkey_s_c k)
{ {
struct bkey_s_c_extent e;
const struct bch_extent_ptr *ptr;
struct bch_extent_crc_unpacked crc;
unsigned ret = 0; unsigned ret = 0;
switch (k.k->type) { switch (k.k->type) {
case BCH_EXTENT: case BCH_EXTENT:
case BCH_EXTENT_CACHED: case BCH_EXTENT_CACHED: {
e = bkey_s_c_to_extent(k); struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
extent_for_each_ptr_crc(e, ptr, crc) extent_for_each_ptr_decode(e, p, entry)
if (!ptr->cached && if (!p.ptr.cached &&
crc.compression_type != BCH_COMPRESSION_NONE && p.crc.compression_type != BCH_COMPRESSION_NONE &&
crc.compressed_size < crc.live_size) p.crc.compressed_size < p.crc.live_size)
ret = max_t(unsigned, ret, crc.compressed_size); ret = max_t(unsigned, ret, p.crc.compressed_size);
}
} }
return ret; return ret;
...@@ -254,17 +254,17 @@ unsigned bch2_extent_is_compressed(struct bkey_s_c k) ...@@ -254,17 +254,17 @@ unsigned bch2_extent_is_compressed(struct bkey_s_c k)
bool bch2_extent_matches_ptr(struct bch_fs *c, struct bkey_s_c_extent e, bool bch2_extent_matches_ptr(struct bch_fs *c, struct bkey_s_c_extent e,
struct bch_extent_ptr m, u64 offset) struct bch_extent_ptr m, u64 offset)
{ {
const struct bch_extent_ptr *ptr; const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc; struct extent_ptr_decoded p;
extent_for_each_ptr_crc(e, ptr, crc) extent_for_each_ptr_decode(e, p, entry)
if (ptr->dev == m.dev && if (p.ptr.dev == m.dev &&
ptr->gen == m.gen && p.ptr.gen == m.gen &&
(s64) ptr->offset + crc.offset - bkey_start_offset(e.k) == (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(e.k) ==
(s64) m.offset - offset) (s64) m.offset - offset)
return ptr; return true;
return NULL; return false;
} }
/* Doesn't cleanup redundant crcs */ /* Doesn't cleanup redundant crcs */
...@@ -323,7 +323,7 @@ bool bch2_extent_narrow_crcs(struct bkey_i_extent *e, ...@@ -323,7 +323,7 @@ bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
struct bch_extent_crc_unpacked n) struct bch_extent_crc_unpacked n)
{ {
struct bch_extent_crc_unpacked u; struct bch_extent_crc_unpacked u;
struct bch_extent_ptr *ptr; struct extent_ptr_decoded p;
union bch_extent_entry *i; union bch_extent_entry *i;
/* Find a checksum entry that covers only live data: */ /* Find a checksum entry that covers only live data: */
...@@ -345,11 +345,11 @@ bool bch2_extent_narrow_crcs(struct bkey_i_extent *e, ...@@ -345,11 +345,11 @@ bool bch2_extent_narrow_crcs(struct bkey_i_extent *e,
bch2_extent_crc_append(e, n); bch2_extent_crc_append(e, n);
restart_narrow_pointers: restart_narrow_pointers:
extent_for_each_ptr_crc(extent_i_to_s(e), ptr, u) extent_for_each_ptr_decode(extent_i_to_s(e), p, i)
if (can_narrow_crc(u, n)) { if (can_narrow_crc(p.crc, n)) {
ptr->offset += u.offset; i->ptr.offset += p.crc.offset;
extent_ptr_append(e, *ptr); extent_ptr_append(e, i->ptr);
__bch2_extent_drop_ptr(extent_i_to_s(e), ptr); __bch2_extent_drop_ptr(extent_i_to_s(e), &i->ptr);
goto restart_narrow_pointers; goto restart_narrow_pointers;
} }
...@@ -475,6 +475,8 @@ void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k) ...@@ -475,6 +475,8 @@ void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k)); entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
entry = extent_entry_next(entry)) { entry = extent_entry_next(entry)) {
switch (extent_entry_type(entry)) { switch (extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
break;
case BCH_EXTENT_ENTRY_crc32: case BCH_EXTENT_ENTRY_crc32:
entry->crc32.csum = swab32(entry->crc32.csum); entry->crc32.csum = swab32(entry->crc32.csum);
break; break;
...@@ -488,8 +490,6 @@ void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k) ...@@ -488,8 +490,6 @@ void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
entry->crc128.csum.lo = (__force __le64) entry->crc128.csum.lo = (__force __le64)
swab64((__force u64) entry->crc128.csum.lo); swab64((__force u64) entry->crc128.csum.lo);
break; break;
case BCH_EXTENT_ENTRY_ptr:
break;
} }
} }
break; break;
...@@ -605,28 +605,28 @@ static int extent_pick_read_device(struct bch_fs *c, ...@@ -605,28 +605,28 @@ static int extent_pick_read_device(struct bch_fs *c,
struct bch_devs_mask *avoid, struct bch_devs_mask *avoid,
struct extent_ptr_decoded *pick) struct extent_ptr_decoded *pick)
{ {
const struct bch_extent_ptr *ptr; const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc; struct extent_ptr_decoded p;
struct bch_dev *ca; struct bch_dev *ca;
int ret = 0; int ret = 0;
extent_for_each_ptr_crc(e, ptr, crc) { extent_for_each_ptr_decode(e, p, entry) {
ca = bch_dev_bkey_exists(c, ptr->dev); ca = bch_dev_bkey_exists(c, p.ptr.dev);
if (ptr->cached && ptr_stale(ca, ptr)) if (p.ptr.cached && ptr_stale(ca, &p.ptr))
continue; continue;
if (avoid && test_bit(ptr->dev, avoid->d)) /*
continue; * XXX: need to make avoid work correctly for stripe ptrs
*/
if (ret && !dev_latency_better(c, ptr, &pick->ptr)) if (avoid && test_bit(p.ptr.dev, avoid->d))
continue; continue;
*pick = (struct extent_ptr_decoded) { if (ret && !dev_latency_better(c, &p.ptr, &pick->ptr))
.ptr = *ptr, continue;
.crc = crc,
};
*pick = p;
ret = 1; ret = 1;
} }
......
...@@ -182,12 +182,24 @@ static inline size_t extent_entry_u64s(const union bch_extent_entry *entry) ...@@ -182,12 +182,24 @@ static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
static inline bool extent_entry_is_ptr(const union bch_extent_entry *e) static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
{ {
return extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr; switch (extent_entry_type(e)) {
case BCH_EXTENT_ENTRY_ptr:
return true;
default:
return false;
}
} }
static inline bool extent_entry_is_crc(const union bch_extent_entry *e) static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
{ {
return !extent_entry_is_ptr(e); switch (extent_entry_type(e)) {
case BCH_EXTENT_ENTRY_crc32:
case BCH_EXTENT_ENTRY_crc64:
case BCH_EXTENT_ENTRY_crc128:
return true;
default:
return false;
}
} }
union bch_extent_crc { union bch_extent_crc {
...@@ -310,23 +322,25 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc) ...@@ -310,23 +322,25 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
#define extent_for_each_entry(_e, _entry) \ #define extent_for_each_entry(_e, _entry) \
extent_for_each_entry_from(_e, _entry, (_e).v->start) extent_for_each_entry_from(_e, _entry, (_e).v->start)
/* Iterate over crcs only: */ /* Iterate over pointers only: */
#define __extent_crc_next(_e, _p) \ #define extent_ptr_next(_e, _ptr) \
({ \ ({ \
typeof(&(_e).v->start[0]) _entry = _p; \ typeof(&(_e).v->start[0]) _entry; \
\ \
while ((_entry) < extent_entry_last(_e) && \ extent_for_each_entry_from(_e, _entry, to_entry(_ptr)) \
!extent_entry_is_crc(_entry)) \ if (extent_entry_is_ptr(_entry)) \
(_entry) = extent_entry_next(_entry); \ break; \
\ \
entry_to_crc(_entry < extent_entry_last(_e) ? _entry : NULL); \ _entry < extent_entry_last(_e) ? entry_to_ptr(_entry) : NULL; \
}) })
#define __extent_for_each_crc(_e, _crc) \ #define extent_for_each_ptr(_e, _ptr) \
for ((_crc) = __extent_crc_next(_e, (_e).v->start); \ for ((_ptr) = &(_e).v->start->ptr; \
(_crc); \ ((_ptr) = extent_ptr_next(_e, _ptr)); \
(_crc) = __extent_crc_next(_e, extent_entry_next(to_entry(_crc)))) (_ptr)++)
/* Iterate over crcs only: */
#define extent_crc_next(_e, _crc, _iter) \ #define extent_crc_next(_e, _crc, _iter) \
({ \ ({ \
...@@ -347,43 +361,44 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc) ...@@ -347,43 +361,44 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
/* Iterate over pointers, with crcs: */ /* Iterate over pointers, with crcs: */
#define extent_ptr_crc_next(_e, _ptr, _crc) \ static inline struct extent_ptr_decoded
__extent_ptr_decoded_init(const struct bkey *k)
{
return (struct extent_ptr_decoded) {
.crc = bch2_extent_crc_unpack(k, NULL),
};
}
#define EXTENT_ITERATE_EC (1 << 0)
#define __extent_ptr_next_decode(_e, _ptr, _entry) \
({ \ ({ \
__label__ out; \ __label__ out; \
typeof(&(_e).v->start[0]) _entry; \
\ \
extent_for_each_entry_from(_e, _entry, to_entry(_ptr)) \ extent_for_each_entry_from(_e, _entry, _entry) \
if (extent_entry_is_crc(_entry)) { \ switch (extent_entry_type(_entry)) { \
(_crc) = bch2_extent_crc_unpack((_e).k, entry_to_crc(_entry));\ case BCH_EXTENT_ENTRY_ptr: \
} else { \ (_ptr).ptr = _entry->ptr; \
_ptr = entry_to_ptr(_entry); \
goto out; \ goto out; \
case BCH_EXTENT_ENTRY_crc32: \
case BCH_EXTENT_ENTRY_crc64: \
case BCH_EXTENT_ENTRY_crc128: \
(_ptr).crc = bch2_extent_crc_unpack((_e).k, \
entry_to_crc(_entry)); \
break; \
} \ } \
\ \
_ptr = NULL; \
out: \ out: \
_ptr; \ _entry < extent_entry_last(_e); \
}) })
#define extent_for_each_ptr_crc(_e, _ptr, _crc) \ #define extent_for_each_ptr_decode(_e, _ptr, _entry) \
for ((_crc) = bch2_extent_crc_unpack((_e).k, NULL), \ for ((_ptr) = __extent_ptr_decoded_init((_e).k), \
(_ptr) = &(_e).v->start->ptr; \ (_entry) = (_e).v->start; \
((_ptr) = extent_ptr_crc_next(_e, _ptr, _crc)); \ __extent_ptr_next_decode(_e, _ptr, _entry); \
(_ptr)++) (_entry) = extent_entry_next(_entry))
/* Iterate over pointers only, and from a given position: */
#define extent_ptr_next(_e, _ptr) \
({ \
struct bch_extent_crc_unpacked _crc; \
\
extent_ptr_crc_next(_e, _ptr, _crc); \
})
#define extent_for_each_ptr(_e, _ptr) \ /* Iterate over pointers backwards: */
for ((_ptr) = &(_e).v->start->ptr; \
((_ptr) = extent_ptr_next(_e, _ptr)); \
(_ptr)++)
#define extent_ptr_prev(_e, _ptr) \ #define extent_ptr_prev(_e, _ptr) \
({ \ ({ \
......
...@@ -920,12 +920,12 @@ static void bchfs_read(struct bch_fs *c, struct btree_iter *iter, ...@@ -920,12 +920,12 @@ static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
if (bkey_extent_is_data(k.k)) { if (bkey_extent_is_data(k.k)) {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k); struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
struct bch_extent_crc_unpacked crc;
const union bch_extent_entry *i; const union bch_extent_entry *i;
struct extent_ptr_decoded p;
extent_for_each_crc(e, crc, i) extent_for_each_ptr_decode(e, p, i)
want_full_extent |= ((crc.csum_type != 0) | want_full_extent |= ((p.crc.csum_type != 0) |
(crc.compression_type != 0)); (p.crc.compression_type != 0));
} }
readpage_bio_extend(readpages_iter, readpage_bio_extend(readpages_iter,
......
...@@ -1053,27 +1053,27 @@ static int bch2_fill_extent(struct fiemap_extent_info *info, ...@@ -1053,27 +1053,27 @@ static int bch2_fill_extent(struct fiemap_extent_info *info,
{ {
if (bkey_extent_is_data(&k->k)) { if (bkey_extent_is_data(&k->k)) {
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k); struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
const struct bch_extent_ptr *ptr; const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc; struct extent_ptr_decoded p;
int ret; int ret;
extent_for_each_ptr_crc(e, ptr, crc) { extent_for_each_ptr_decode(e, p, entry) {
int flags2 = 0; int flags2 = 0;
u64 offset = ptr->offset; u64 offset = p.ptr.offset;
if (crc.compression_type) if (p.crc.compression_type)
flags2 |= FIEMAP_EXTENT_ENCODED; flags2 |= FIEMAP_EXTENT_ENCODED;
else else
offset += crc.offset; offset += p.crc.offset;
if ((offset & (PAGE_SECTORS - 1)) || if ((offset & (PAGE_SECTORS - 1)) ||
(e.k->size & (PAGE_SECTORS - 1))) (e.k->size & (PAGE_SECTORS - 1)))
flags2 |= FIEMAP_EXTENT_NOT_ALIGNED; flags2 |= FIEMAP_EXTENT_NOT_ALIGNED;
ret = fiemap_fill_next_extent(info, ret = fiemap_fill_next_extent(info,
bkey_start_offset(e.k) << 9, bkey_start_offset(e.k) << 9,
offset << 9, offset << 9,
e.k->size << 9, flags|flags2); e.k->size << 9, flags|flags2);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -67,8 +67,8 @@ static int bch2_migrate_index_update(struct bch_write_op *op) ...@@ -67,8 +67,8 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
struct bkey_i_extent *insert, *new = struct bkey_i_extent *insert, *new =
bkey_i_to_extent(bch2_keylist_front(keys)); bkey_i_to_extent(bch2_keylist_front(keys));
BKEY_PADDED(k) _new, _insert; BKEY_PADDED(k) _new, _insert;
struct bch_extent_ptr *ptr; const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc; struct extent_ptr_decoded p;
bool did_work = false; bool did_work = false;
int nr; int nr;
...@@ -99,14 +99,15 @@ static int bch2_migrate_index_update(struct bch_write_op *op) ...@@ -99,14 +99,15 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
bch2_cut_back(insert->k.p, &new->k); bch2_cut_back(insert->k.p, &new->k);
if (m->data_cmd == DATA_REWRITE) { if (m->data_cmd == DATA_REWRITE) {
ptr = (struct bch_extent_ptr *) struct bch_extent_ptr *ptr = (void *)
bch2_extent_has_device(extent_i_to_s_c(insert), bch2_extent_has_device(extent_i_to_s_c(insert),
m->data_opts.rewrite_dev); m->data_opts.rewrite_dev);
BUG_ON(!ptr);
bch2_extent_drop_ptr(extent_i_to_s(insert), ptr); bch2_extent_drop_ptr(extent_i_to_s(insert), ptr);
} }
extent_for_each_ptr_crc(extent_i_to_s(new), ptr, crc) { extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) {
if (bch2_extent_has_device(extent_i_to_s_c(insert), ptr->dev)) { if (bch2_extent_has_device(extent_i_to_s_c(insert), p.ptr.dev)) {
/* /*
* raced with another move op? extent already * raced with another move op? extent already
* has a pointer to the device we just wrote * has a pointer to the device we just wrote
...@@ -115,8 +116,8 @@ static int bch2_migrate_index_update(struct bch_write_op *op) ...@@ -115,8 +116,8 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
continue; continue;
} }
bch2_extent_crc_append(insert, crc); bch2_extent_crc_append(insert, p.crc);
extent_ptr_append(insert, *ptr); extent_ptr_append(insert, p.ptr);
did_work = true; did_work = true;
} }
...@@ -379,8 +380,8 @@ static int bch2_move_extent(struct bch_fs *c, ...@@ -379,8 +380,8 @@ static int bch2_move_extent(struct bch_fs *c,
struct data_opts data_opts) struct data_opts data_opts)
{ {
struct moving_io *io; struct moving_io *io;
const struct bch_extent_ptr *ptr; const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc; struct extent_ptr_decoded p;
unsigned sectors = e.k->size, pages; unsigned sectors = e.k->size, pages;
int ret = -ENOMEM; int ret = -ENOMEM;
...@@ -393,8 +394,8 @@ static int bch2_move_extent(struct bch_fs *c, ...@@ -393,8 +394,8 @@ static int bch2_move_extent(struct bch_fs *c,
SECTORS_IN_FLIGHT_PER_DEVICE); SECTORS_IN_FLIGHT_PER_DEVICE);
/* write path might have to decompress data: */ /* write path might have to decompress data: */
extent_for_each_ptr_crc(e, ptr, crc) extent_for_each_ptr_decode(e, p, entry)
sectors = max_t(unsigned, sectors, crc.uncompressed_size); sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
pages = DIV_ROUND_UP(sectors, PAGE_SECTORS); pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
io = kzalloc(sizeof(struct moving_io) + io = kzalloc(sizeof(struct moving_io) +
......
...@@ -18,17 +18,16 @@ ...@@ -18,17 +18,16 @@
#include <linux/sched/cputime.h> #include <linux/sched/cputime.h>
static inline bool rebalance_ptr_pred(struct bch_fs *c, static inline bool rebalance_ptr_pred(struct bch_fs *c,
const struct bch_extent_ptr *ptr, struct extent_ptr_decoded p,
struct bch_extent_crc_unpacked crc,
struct bch_io_opts *io_opts) struct bch_io_opts *io_opts)
{ {
if (io_opts->background_target && if (io_opts->background_target &&
!bch2_dev_in_target(c, ptr->dev, io_opts->background_target) && !bch2_dev_in_target(c, p.ptr.dev, io_opts->background_target) &&
!ptr->cached) !p.ptr.cached)
return true; return true;
if (io_opts->background_compression && if (io_opts->background_compression &&
crc.compression_type != p.crc.compression_type !=
bch2_compression_opt_to_type[io_opts->background_compression]) bch2_compression_opt_to_type[io_opts->background_compression])
return true; return true;
...@@ -39,8 +38,8 @@ void bch2_rebalance_add_key(struct bch_fs *c, ...@@ -39,8 +38,8 @@ void bch2_rebalance_add_key(struct bch_fs *c,
struct bkey_s_c k, struct bkey_s_c k,
struct bch_io_opts *io_opts) struct bch_io_opts *io_opts)
{ {
const struct bch_extent_ptr *ptr; const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc; struct extent_ptr_decoded p;
struct bkey_s_c_extent e; struct bkey_s_c_extent e;
if (!bkey_extent_is_data(k.k)) if (!bkey_extent_is_data(k.k))
...@@ -52,13 +51,13 @@ void bch2_rebalance_add_key(struct bch_fs *c, ...@@ -52,13 +51,13 @@ void bch2_rebalance_add_key(struct bch_fs *c,
e = bkey_s_c_to_extent(k); e = bkey_s_c_to_extent(k);
extent_for_each_ptr_crc(e, ptr, crc) extent_for_each_ptr_decode(e, p, entry)
if (rebalance_ptr_pred(c, ptr, crc, io_opts)) { if (rebalance_ptr_pred(c, p, io_opts)) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
if (atomic64_add_return(crc.compressed_size, if (atomic64_add_return(p.crc.compressed_size,
&ca->rebalance_work) == &ca->rebalance_work) ==
crc.compressed_size) p.crc.compressed_size)
rebalance_wakeup(c); rebalance_wakeup(c);
} }
} }
...@@ -76,16 +75,16 @@ static enum data_cmd rebalance_pred(struct bch_fs *c, void *arg, ...@@ -76,16 +75,16 @@ static enum data_cmd rebalance_pred(struct bch_fs *c, void *arg,
struct bch_io_opts *io_opts, struct bch_io_opts *io_opts,
struct data_opts *data_opts) struct data_opts *data_opts)
{ {
const struct bch_extent_ptr *ptr; const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc; struct extent_ptr_decoded p;
/* Make sure we have room to add a new pointer: */ /* Make sure we have room to add a new pointer: */
if (bkey_val_u64s(e.k) + BKEY_EXTENT_PTR_U64s_MAX > if (bkey_val_u64s(e.k) + BKEY_EXTENT_PTR_U64s_MAX >
BKEY_EXTENT_VAL_U64s_MAX) BKEY_EXTENT_VAL_U64s_MAX)
return DATA_SKIP; return DATA_SKIP;
extent_for_each_ptr_crc(e, ptr, crc) extent_for_each_ptr_decode(e, p, entry)
if (rebalance_ptr_pred(c, ptr, crc, io_opts)) if (rebalance_ptr_pred(c, p, io_opts))
goto found; goto found;
return DATA_SKIP; return DATA_SKIP;
......
...@@ -283,19 +283,19 @@ static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf) ...@@ -283,19 +283,19 @@ static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0, k) for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0, k)
if (k.k->type == BCH_EXTENT) { if (k.k->type == BCH_EXTENT) {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k); struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const struct bch_extent_ptr *ptr; const union bch_extent_entry *entry;
struct bch_extent_crc_unpacked crc; struct extent_ptr_decoded p;
extent_for_each_ptr_crc(e, ptr, crc) { extent_for_each_ptr_decode(e, p, entry) {
if (crc.compression_type == BCH_COMPRESSION_NONE) { if (p.crc.compression_type == BCH_COMPRESSION_NONE) {
nr_uncompressed_extents++; nr_uncompressed_extents++;
uncompressed_sectors += e.k->size; uncompressed_sectors += e.k->size;
} else { } else {
nr_compressed_extents++; nr_compressed_extents++;
compressed_sectors_compressed += compressed_sectors_compressed +=
crc.compressed_size; p.crc.compressed_size;
compressed_sectors_uncompressed += compressed_sectors_uncompressed +=
crc.uncompressed_size; p.crc.uncompressed_size;
} }
/* only looking at the first ptr */ /* only looking at the first ptr */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment