Commit 18d46e76 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.7-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:
 "A few fixes and message updates:

   - for simple quotas, handle the case when a snapshot is created and
     the target qgroup already exists

   - fix a warning when file descriptor given to send ioctl is not
     writable

   - fix off-by-one condition when checking chunk maps

   - free pages when page array allocation fails during compression
     read, other cases were handled

   - fix memory leak on error handling path in ref-verify debugging
     feature

   - copy missing struct member 'version' in 64/32bit compat send ioctl

   - tree-checker verifies inline backref ordering

   - print messages to syslog on first mount and last unmount

   - update error messages when reading chunk maps"

* tag 'for-6.7-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: send: ensure send_fd is writable
  btrfs: free the allocated memory if btrfs_alloc_page_array() fails
  btrfs: fix 64bit compat send ioctl arguments not initializing version member
  btrfs: make error messages more clear when getting a chunk map
  btrfs: fix off-by-one when checking chunk map includes logical address
  btrfs: ref-verify: fix memory leaks in btrfs_ref_tree_mod()
  btrfs: add dmesg output for first mount and last unmount of a filesystem
  btrfs: do not abort transaction if there is already an existing qgroup
  btrfs: tree-checker: add type and sequence check for inline backrefs
parents df60cee2 0ac1d13a
...@@ -3213,6 +3213,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3213,6 +3213,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
goto fail_alloc; goto fail_alloc;
} }
btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
/* /*
* Verify the type first, if that or the checksum value are * Verify the type first, if that or the checksum value are
* corrupted, we'll find out * corrupted, we'll find out
......
...@@ -674,8 +674,8 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio) ...@@ -674,8 +674,8 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio)
* the array will be skipped * the array will be skipped
* *
* Return: 0 if all pages were able to be allocated; * Return: 0 if all pages were able to be allocated;
* -ENOMEM otherwise, and the caller is responsible for freeing all * -ENOMEM otherwise, the partially allocated pages would be freed and
* non-null page pointers in the array. * the array slots zeroed
*/ */
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array) int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
{ {
...@@ -694,8 +694,13 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array) ...@@ -694,8 +694,13 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
* though alloc_pages_bulk_array() falls back to alloc_page() * though alloc_pages_bulk_array() falls back to alloc_page()
* if it could not bulk-allocate. So we must be out of memory. * if it could not bulk-allocate. So we must be out of memory.
*/ */
if (allocated == last) if (allocated == last) {
for (int i = 0; i < allocated; i++) {
__free_page(page_array[i]);
page_array[i] = NULL;
}
return -ENOMEM; return -ENOMEM;
}
memalloc_retry_wait(GFP_NOFS); memalloc_retry_wait(GFP_NOFS);
} }
......
...@@ -4356,6 +4356,7 @@ static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat ...@@ -4356,6 +4356,7 @@ static int _btrfs_ioctl_send(struct inode *inode, void __user *argp, bool compat
arg->clone_sources = compat_ptr(args32.clone_sources); arg->clone_sources = compat_ptr(args32.clone_sources);
arg->parent_root = args32.parent_root; arg->parent_root = args32.parent_root;
arg->flags = args32.flags; arg->flags = args32.flags;
arg->version = args32.version;
memcpy(arg->reserved, args32.reserved, memcpy(arg->reserved, args32.reserved,
sizeof(args32.reserved)); sizeof(args32.reserved));
#else #else
......
...@@ -794,6 +794,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, ...@@ -794,6 +794,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
dump_ref_action(fs_info, ra); dump_ref_action(fs_info, ra);
kfree(ref); kfree(ref);
kfree(ra); kfree(ra);
kfree(re);
goto out_unlock; goto out_unlock;
} else if (be->num_refs == 0) { } else if (be->num_refs == 0) {
btrfs_err(fs_info, btrfs_err(fs_info,
...@@ -803,6 +804,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, ...@@ -803,6 +804,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
dump_ref_action(fs_info, ra); dump_ref_action(fs_info, ra);
kfree(ref); kfree(ref);
kfree(ra); kfree(ra);
kfree(re);
goto out_unlock; goto out_unlock;
} }
......
...@@ -8158,7 +8158,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg) ...@@ -8158,7 +8158,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
} }
sctx->send_filp = fget(arg->send_fd); sctx->send_filp = fget(arg->send_fd);
if (!sctx->send_filp) { if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
ret = -EBADF; ret = -EBADF;
goto out; goto out;
} }
......
...@@ -80,7 +80,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data); ...@@ -80,7 +80,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data);
static void btrfs_put_super(struct super_block *sb) static void btrfs_put_super(struct super_block *sb)
{ {
close_ctree(btrfs_sb(sb)); struct btrfs_fs_info *fs_info = btrfs_sb(sb);
btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid);
close_ctree(fs_info);
} }
enum { enum {
......
...@@ -1774,7 +1774,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ...@@ -1774,7 +1774,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_release_path(path); btrfs_release_path(path);
ret = btrfs_create_qgroup(trans, objectid); ret = btrfs_create_qgroup(trans, objectid);
if (ret) { if (ret && ret != -EEXIST) {
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
goto fail; goto fail;
} }
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "inode-item.h" #include "inode-item.h"
#include "dir-item.h" #include "dir-item.h"
#include "raid-stripe-tree.h" #include "raid-stripe-tree.h"
#include "extent-tree.h"
/* /*
* Error message should follow the following format: * Error message should follow the following format:
...@@ -1276,6 +1277,8 @@ static int check_extent_item(struct extent_buffer *leaf, ...@@ -1276,6 +1277,8 @@ static int check_extent_item(struct extent_buffer *leaf,
unsigned long ptr; /* Current pointer inside inline refs */ unsigned long ptr; /* Current pointer inside inline refs */
unsigned long end; /* Extent item end */ unsigned long end; /* Extent item end */
const u32 item_size = btrfs_item_size(leaf, slot); const u32 item_size = btrfs_item_size(leaf, slot);
u8 last_type = 0;
u64 last_seq = U64_MAX;
u64 flags; u64 flags;
u64 generation; u64 generation;
u64 total_refs; /* Total refs in btrfs_extent_item */ u64 total_refs; /* Total refs in btrfs_extent_item */
...@@ -1322,6 +1325,18 @@ static int check_extent_item(struct extent_buffer *leaf, ...@@ -1322,6 +1325,18 @@ static int check_extent_item(struct extent_buffer *leaf,
* 2.2) Ref type specific data * 2.2) Ref type specific data
* Either using btrfs_extent_inline_ref::offset, or specific * Either using btrfs_extent_inline_ref::offset, or specific
* data structure. * data structure.
*
* All above inline items should follow the order:
*
* - All btrfs_extent_inline_ref::type should be in an ascending
* order
*
* - Within the same type, the items should follow a descending
* order by their sequence number. The sequence number is
* determined by:
* * btrfs_extent_inline_ref::offset for all types other than
* EXTENT_DATA_REF
* * hash_extent_data_ref() for EXTENT_DATA_REF
*/ */
if (unlikely(item_size < sizeof(*ei))) { if (unlikely(item_size < sizeof(*ei))) {
extent_err(leaf, slot, extent_err(leaf, slot,
...@@ -1403,6 +1418,7 @@ static int check_extent_item(struct extent_buffer *leaf, ...@@ -1403,6 +1418,7 @@ static int check_extent_item(struct extent_buffer *leaf,
struct btrfs_extent_inline_ref *iref; struct btrfs_extent_inline_ref *iref;
struct btrfs_extent_data_ref *dref; struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref; struct btrfs_shared_data_ref *sref;
u64 seq;
u64 dref_offset; u64 dref_offset;
u64 inline_offset; u64 inline_offset;
u8 inline_type; u8 inline_type;
...@@ -1416,6 +1432,7 @@ static int check_extent_item(struct extent_buffer *leaf, ...@@ -1416,6 +1432,7 @@ static int check_extent_item(struct extent_buffer *leaf,
iref = (struct btrfs_extent_inline_ref *)ptr; iref = (struct btrfs_extent_inline_ref *)ptr;
inline_type = btrfs_extent_inline_ref_type(leaf, iref); inline_type = btrfs_extent_inline_ref_type(leaf, iref);
inline_offset = btrfs_extent_inline_ref_offset(leaf, iref); inline_offset = btrfs_extent_inline_ref_offset(leaf, iref);
seq = inline_offset;
if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) { if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
extent_err(leaf, slot, extent_err(leaf, slot,
"inline ref item overflows extent item, ptr %lu iref size %u end %lu", "inline ref item overflows extent item, ptr %lu iref size %u end %lu",
...@@ -1446,6 +1463,10 @@ static int check_extent_item(struct extent_buffer *leaf, ...@@ -1446,6 +1463,10 @@ static int check_extent_item(struct extent_buffer *leaf,
case BTRFS_EXTENT_DATA_REF_KEY: case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset); dref = (struct btrfs_extent_data_ref *)(&iref->offset);
dref_offset = btrfs_extent_data_ref_offset(leaf, dref); dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
seq = hash_extent_data_ref(
btrfs_extent_data_ref_root(leaf, dref),
btrfs_extent_data_ref_objectid(leaf, dref),
btrfs_extent_data_ref_offset(leaf, dref));
if (unlikely(!IS_ALIGNED(dref_offset, if (unlikely(!IS_ALIGNED(dref_offset,
fs_info->sectorsize))) { fs_info->sectorsize))) {
extent_err(leaf, slot, extent_err(leaf, slot,
...@@ -1475,6 +1496,24 @@ static int check_extent_item(struct extent_buffer *leaf, ...@@ -1475,6 +1496,24 @@ static int check_extent_item(struct extent_buffer *leaf,
inline_type); inline_type);
return -EUCLEAN; return -EUCLEAN;
} }
if (inline_type < last_type) {
extent_err(leaf, slot,
"inline ref out-of-order: has type %u, prev type %u",
inline_type, last_type);
return -EUCLEAN;
}
/* Type changed, allow the sequence starts from U64_MAX again. */
if (inline_type > last_type)
last_seq = U64_MAX;
if (seq > last_seq) {
extent_err(leaf, slot,
"inline ref out-of-order: has type %u offset %llu seq 0x%llx, prev type %u seq 0x%llx",
inline_type, inline_offset, seq,
last_type, last_seq);
return -EUCLEAN;
}
last_type = inline_type;
last_seq = seq;
ptr += btrfs_extent_inline_ref_size(inline_type); ptr += btrfs_extent_inline_ref_size(inline_type);
} }
/* No padding is allowed */ /* No padding is allowed */
......
...@@ -3006,15 +3006,16 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, ...@@ -3006,15 +3006,16 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
read_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
if (!em) { if (!em) {
btrfs_crit(fs_info, "unable to find logical %llu length %llu", btrfs_crit(fs_info,
"unable to find chunk map for logical %llu length %llu",
logical, length); logical, length);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (em->start > logical || em->start + em->len < logical) { if (em->start > logical || em->start + em->len <= logical) {
btrfs_crit(fs_info, btrfs_crit(fs_info,
"found a bad mapping, wanted %llu-%llu, found %llu-%llu", "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
logical, length, em->start, em->start + em->len); logical, logical + length, em->start, em->start + em->len);
free_extent_map(em); free_extent_map(em);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment