Commit dcdfd9cc authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.9-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:
 "Two small fixes and a bunch of lockdep fixes for warnings that show up
  with an upcoming tree locking update but are valid with current locks
  as well"

* tag 'for-5.9-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: tree-checker: fix the error message for transid error
  btrfs: set the lockdep class for log tree extent buffers
  btrfs: set the correct lockdep class for new nodes
  btrfs: allocate scrub workqueues outside of locks
  btrfs: fix potential deadlock in the search ioctl
  btrfs: drop path before adding new uuid tree entry
  btrfs: block-group: fix free-space bitmap threshold
parents b765a32a f96d6960
...@@ -1798,7 +1798,6 @@ static struct btrfs_block_group *btrfs_create_block_group_cache( ...@@ -1798,7 +1798,6 @@ static struct btrfs_block_group *btrfs_create_block_group_cache(
cache->fs_info = fs_info; cache->fs_info = fs_info;
cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
set_free_space_tree_thresholds(cache);
cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
...@@ -1912,6 +1911,8 @@ static int read_one_block_group(struct btrfs_fs_info *info, ...@@ -1912,6 +1911,8 @@ static int read_one_block_group(struct btrfs_fs_info *info,
if (ret < 0) if (ret < 0)
goto error; goto error;
set_free_space_tree_thresholds(cache);
if (need_clear) { if (need_clear) {
/* /*
* When we mount with old space cache, we need to * When we mount with old space cache, we need to
...@@ -2132,6 +2133,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, ...@@ -2132,6 +2133,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
return -ENOMEM; return -ENOMEM;
cache->length = size; cache->length = size;
set_free_space_tree_thresholds(cache);
cache->used = bytes_used; cache->used = bytes_used;
cache->flags = type; cache->flags = type;
cache->last_byte_to_unpin = (u64)-1; cache->last_byte_to_unpin = (u64)-1;
......
...@@ -1297,6 +1297,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, ...@@ -1297,6 +1297,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
btrfs_tree_read_unlock_blocking(eb); btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb); free_extent_buffer(eb);
btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
eb_rewin, btrfs_header_level(eb_rewin));
btrfs_tree_read_lock(eb_rewin); btrfs_tree_read_lock(eb_rewin);
__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
WARN_ON(btrfs_header_nritems(eb_rewin) > WARN_ON(btrfs_header_nritems(eb_rewin) >
...@@ -1370,7 +1372,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq) ...@@ -1370,7 +1372,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
if (!eb) if (!eb)
return NULL; return NULL;
btrfs_tree_read_lock(eb);
if (old_root) { if (old_root) {
btrfs_set_header_bytenr(eb, eb->start); btrfs_set_header_bytenr(eb, eb->start);
btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
...@@ -1378,6 +1379,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq) ...@@ -1378,6 +1379,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
btrfs_set_header_level(eb, old_root->level); btrfs_set_header_level(eb, old_root->level);
btrfs_set_header_generation(eb, old_generation); btrfs_set_header_generation(eb, old_generation);
} }
btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
btrfs_header_level(eb));
btrfs_tree_read_lock(eb);
if (tm) if (tm)
__tree_mod_log_rewind(fs_info, eb, time_seq, tm); __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
else else
......
...@@ -4527,7 +4527,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, ...@@ -4527,7 +4527,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
return ERR_PTR(-EUCLEAN); return ERR_PTR(-EUCLEAN);
} }
btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); btrfs_set_buffer_lockdep_class(owner, buf, level);
btrfs_tree_lock(buf); btrfs_tree_lock(buf);
btrfs_clean_tree_block(buf); btrfs_clean_tree_block(buf);
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
......
...@@ -5655,9 +5655,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv, ...@@ -5655,9 +5655,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
} }
} }
int read_extent_buffer_to_user(const struct extent_buffer *eb, int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
void __user *dstv, void __user *dstv,
unsigned long start, unsigned long len) unsigned long start, unsigned long len)
{ {
size_t cur; size_t cur;
size_t offset; size_t offset;
...@@ -5677,7 +5677,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb, ...@@ -5677,7 +5677,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb,
cur = min(len, (PAGE_SIZE - offset)); cur = min(len, (PAGE_SIZE - offset));
kaddr = page_address(page); kaddr = page_address(page);
if (copy_to_user(dst, kaddr + offset, cur)) { if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
ret = -EFAULT; ret = -EFAULT;
break; break;
} }
......
...@@ -241,9 +241,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, ...@@ -241,9 +241,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
void read_extent_buffer(const struct extent_buffer *eb, void *dst, void read_extent_buffer(const struct extent_buffer *eb, void *dst,
unsigned long start, unsigned long start,
unsigned long len); unsigned long len);
int read_extent_buffer_to_user(const struct extent_buffer *eb, int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
void __user *dst, unsigned long start, void __user *dst, unsigned long start,
unsigned long len); unsigned long len);
void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src); void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src);
void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb, void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
const void *src); const void *src);
......
...@@ -22,6 +22,10 @@ void set_free_space_tree_thresholds(struct btrfs_block_group *cache) ...@@ -22,6 +22,10 @@ void set_free_space_tree_thresholds(struct btrfs_block_group *cache)
size_t bitmap_size; size_t bitmap_size;
u64 num_bitmaps, total_bitmap_size; u64 num_bitmaps, total_bitmap_size;
if (WARN_ON(cache->length == 0))
btrfs_warn(cache->fs_info, "block group %llu length is zero",
cache->start);
/* /*
* We convert to bitmaps when the disk space required for using extents * We convert to bitmaps when the disk space required for using extents
* exceeds that required for using bitmaps. * exceeds that required for using bitmaps.
......
...@@ -2086,9 +2086,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, ...@@ -2086,9 +2086,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
sh.len = item_len; sh.len = item_len;
sh.transid = found_transid; sh.transid = found_transid;
/* copy search result header */ /*
if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) { * Copy search result header. If we fault then loop again so we
ret = -EFAULT; * can fault in the pages and -EFAULT there if there's a
* problem. Otherwise we'll fault and then copy the buffer in
* properly this next time through
*/
if (copy_to_user_nofault(ubuf + *sk_offset, &sh, sizeof(sh))) {
ret = 0;
goto out; goto out;
} }
...@@ -2096,10 +2101,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, ...@@ -2096,10 +2101,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
if (item_len) { if (item_len) {
char __user *up = ubuf + *sk_offset; char __user *up = ubuf + *sk_offset;
/* copy the item */ /*
if (read_extent_buffer_to_user(leaf, up, * Copy the item, same behavior as above, but reset the
item_off, item_len)) { * * sk_offset so we copy the full thing again.
ret = -EFAULT; */
if (read_extent_buffer_to_user_nofault(leaf, up,
item_off, item_len)) {
ret = 0;
*sk_offset -= sizeof(sh);
goto out; goto out;
} }
...@@ -2184,6 +2193,10 @@ static noinline int search_ioctl(struct inode *inode, ...@@ -2184,6 +2193,10 @@ static noinline int search_ioctl(struct inode *inode,
key.offset = sk->min_offset; key.offset = sk->min_offset;
while (1) { while (1) {
ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset);
if (ret)
break;
ret = btrfs_search_forward(root, &key, path, sk->min_transid); ret = btrfs_search_forward(root, &key, path, sk->min_transid);
if (ret != 0) { if (ret != 0) {
if (ret > 0) if (ret > 0)
......
...@@ -3716,50 +3716,84 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, ...@@ -3716,50 +3716,84 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
return 0; return 0;
} }
static void scrub_workers_put(struct btrfs_fs_info *fs_info)
{
if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
&fs_info->scrub_lock)) {
struct btrfs_workqueue *scrub_workers = NULL;
struct btrfs_workqueue *scrub_wr_comp = NULL;
struct btrfs_workqueue *scrub_parity = NULL;
scrub_workers = fs_info->scrub_workers;
scrub_wr_comp = fs_info->scrub_wr_completion_workers;
scrub_parity = fs_info->scrub_parity_workers;
fs_info->scrub_workers = NULL;
fs_info->scrub_wr_completion_workers = NULL;
fs_info->scrub_parity_workers = NULL;
mutex_unlock(&fs_info->scrub_lock);
btrfs_destroy_workqueue(scrub_workers);
btrfs_destroy_workqueue(scrub_wr_comp);
btrfs_destroy_workqueue(scrub_parity);
}
}
/* /*
* get a reference count on fs_info->scrub_workers. start worker if necessary * get a reference count on fs_info->scrub_workers. start worker if necessary
*/ */
static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
int is_dev_replace) int is_dev_replace)
{ {
struct btrfs_workqueue *scrub_workers = NULL;
struct btrfs_workqueue *scrub_wr_comp = NULL;
struct btrfs_workqueue *scrub_parity = NULL;
unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
int max_active = fs_info->thread_pool_size; int max_active = fs_info->thread_pool_size;
int ret = -ENOMEM;
lockdep_assert_held(&fs_info->scrub_lock); if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
return 0;
if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) { scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags,
ASSERT(fs_info->scrub_workers == NULL); is_dev_replace ? 1 : max_active, 4);
fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", if (!scrub_workers)
flags, is_dev_replace ? 1 : max_active, 4); goto fail_scrub_workers;
if (!fs_info->scrub_workers)
goto fail_scrub_workers;
ASSERT(fs_info->scrub_wr_completion_workers == NULL);
fs_info->scrub_wr_completion_workers =
btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
max_active, 2);
if (!fs_info->scrub_wr_completion_workers)
goto fail_scrub_wr_completion_workers;
ASSERT(fs_info->scrub_parity_workers == NULL); scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
fs_info->scrub_parity_workers =
btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
max_active, 2); max_active, 2);
if (!fs_info->scrub_parity_workers) if (!scrub_wr_comp)
goto fail_scrub_parity_workers; goto fail_scrub_wr_completion_workers;
scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
max_active, 2);
if (!scrub_parity)
goto fail_scrub_parity_workers;
mutex_lock(&fs_info->scrub_lock);
if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
ASSERT(fs_info->scrub_workers == NULL &&
fs_info->scrub_wr_completion_workers == NULL &&
fs_info->scrub_parity_workers == NULL);
fs_info->scrub_workers = scrub_workers;
fs_info->scrub_wr_completion_workers = scrub_wr_comp;
fs_info->scrub_parity_workers = scrub_parity;
refcount_set(&fs_info->scrub_workers_refcnt, 1); refcount_set(&fs_info->scrub_workers_refcnt, 1);
} else { mutex_unlock(&fs_info->scrub_lock);
refcount_inc(&fs_info->scrub_workers_refcnt); return 0;
} }
return 0; /* Other thread raced in and created the workers for us */
refcount_inc(&fs_info->scrub_workers_refcnt);
mutex_unlock(&fs_info->scrub_lock);
ret = 0;
btrfs_destroy_workqueue(scrub_parity);
fail_scrub_parity_workers: fail_scrub_parity_workers:
btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers); btrfs_destroy_workqueue(scrub_wr_comp);
fail_scrub_wr_completion_workers: fail_scrub_wr_completion_workers:
btrfs_destroy_workqueue(fs_info->scrub_workers); btrfs_destroy_workqueue(scrub_workers);
fail_scrub_workers: fail_scrub_workers:
return -ENOMEM; return ret;
} }
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
...@@ -3770,9 +3804,6 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, ...@@ -3770,9 +3804,6 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
int ret; int ret;
struct btrfs_device *dev; struct btrfs_device *dev;
unsigned int nofs_flag; unsigned int nofs_flag;
struct btrfs_workqueue *scrub_workers = NULL;
struct btrfs_workqueue *scrub_wr_comp = NULL;
struct btrfs_workqueue *scrub_parity = NULL;
if (btrfs_fs_closing(fs_info)) if (btrfs_fs_closing(fs_info))
return -EAGAIN; return -EAGAIN;
...@@ -3819,13 +3850,17 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, ...@@ -3819,13 +3850,17 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (IS_ERR(sctx)) if (IS_ERR(sctx))
return PTR_ERR(sctx); return PTR_ERR(sctx);
ret = scrub_workers_get(fs_info, is_dev_replace);
if (ret)
goto out_free_ctx;
mutex_lock(&fs_info->fs_devices->device_list_mutex); mutex_lock(&fs_info->fs_devices->device_list_mutex);
dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) && if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
!is_dev_replace)) { !is_dev_replace)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ret = -ENODEV; ret = -ENODEV;
goto out_free_ctx; goto out;
} }
if (!is_dev_replace && !readonly && if (!is_dev_replace && !readonly &&
...@@ -3834,7 +3869,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, ...@@ -3834,7 +3869,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable", btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
rcu_str_deref(dev->name)); rcu_str_deref(dev->name));
ret = -EROFS; ret = -EROFS;
goto out_free_ctx; goto out;
} }
mutex_lock(&fs_info->scrub_lock); mutex_lock(&fs_info->scrub_lock);
...@@ -3843,7 +3878,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, ...@@ -3843,7 +3878,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ret = -EIO; ret = -EIO;
goto out_free_ctx; goto out;
} }
down_read(&fs_info->dev_replace.rwsem); down_read(&fs_info->dev_replace.rwsem);
...@@ -3854,17 +3889,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, ...@@ -3854,17 +3889,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ret = -EINPROGRESS; ret = -EINPROGRESS;
goto out_free_ctx; goto out;
} }
up_read(&fs_info->dev_replace.rwsem); up_read(&fs_info->dev_replace.rwsem);
ret = scrub_workers_get(fs_info, is_dev_replace);
if (ret) {
mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
goto out_free_ctx;
}
sctx->readonly = readonly; sctx->readonly = readonly;
dev->scrub_ctx = sctx; dev->scrub_ctx = sctx;
mutex_unlock(&fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);
...@@ -3917,24 +3945,14 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, ...@@ -3917,24 +3945,14 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
mutex_lock(&fs_info->scrub_lock); mutex_lock(&fs_info->scrub_lock);
dev->scrub_ctx = NULL; dev->scrub_ctx = NULL;
if (refcount_dec_and_test(&fs_info->scrub_workers_refcnt)) {
scrub_workers = fs_info->scrub_workers;
scrub_wr_comp = fs_info->scrub_wr_completion_workers;
scrub_parity = fs_info->scrub_parity_workers;
fs_info->scrub_workers = NULL;
fs_info->scrub_wr_completion_workers = NULL;
fs_info->scrub_parity_workers = NULL;
}
mutex_unlock(&fs_info->scrub_lock); mutex_unlock(&fs_info->scrub_lock);
btrfs_destroy_workqueue(scrub_workers); scrub_workers_put(fs_info);
btrfs_destroy_workqueue(scrub_wr_comp);
btrfs_destroy_workqueue(scrub_parity);
scrub_put_ctx(sctx); scrub_put_ctx(sctx);
return ret; return ret;
out:
scrub_workers_put(fs_info);
out_free_ctx: out_free_ctx:
scrub_free_ctx(sctx); scrub_free_ctx(sctx);
......
...@@ -984,7 +984,7 @@ static int check_inode_item(struct extent_buffer *leaf, ...@@ -984,7 +984,7 @@ static int check_inode_item(struct extent_buffer *leaf,
/* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */ /* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) { if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
inode_item_err(leaf, slot, inode_item_err(leaf, slot,
"invalid inode generation: has %llu expect [0, %llu]", "invalid inode transid: has %llu expect [0, %llu]",
btrfs_inode_transid(leaf, iitem), super_gen + 1); btrfs_inode_transid(leaf, iitem), super_gen + 1);
return -EUCLEAN; return -EUCLEAN;
} }
......
...@@ -4462,6 +4462,7 @@ int btrfs_uuid_scan_kthread(void *data) ...@@ -4462,6 +4462,7 @@ int btrfs_uuid_scan_kthread(void *data)
goto skip; goto skip;
} }
update_tree: update_tree:
btrfs_release_path(path);
if (!btrfs_is_empty_uuid(root_item.uuid)) { if (!btrfs_is_empty_uuid(root_item.uuid)) {
ret = btrfs_uuid_tree_add(trans, root_item.uuid, ret = btrfs_uuid_tree_add(trans, root_item.uuid,
BTRFS_UUID_KEY_SUBVOL, BTRFS_UUID_KEY_SUBVOL,
...@@ -4486,6 +4487,7 @@ int btrfs_uuid_scan_kthread(void *data) ...@@ -4486,6 +4487,7 @@ int btrfs_uuid_scan_kthread(void *data)
} }
skip: skip:
btrfs_release_path(path);
if (trans) { if (trans) {
ret = btrfs_end_transaction(trans); ret = btrfs_end_transaction(trans);
trans = NULL; trans = NULL;
...@@ -4493,7 +4495,6 @@ int btrfs_uuid_scan_kthread(void *data) ...@@ -4493,7 +4495,6 @@ int btrfs_uuid_scan_kthread(void *data)
break; break;
} }
btrfs_release_path(path);
if (key.offset < (u64)-1) { if (key.offset < (u64)-1) {
key.offset++; key.offset++;
} else if (key.type < BTRFS_ROOT_ITEM_KEY) { } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment