Commit 66fcf74e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.2-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:

 - explicitly initialize zlib work memory to fix a KCSAN warning

 - limit number of send clones by maximum memory allocated

 - limit device size extent in case it device shrink races with chunk
   allocation

 - raid56 fixes:
     - fix copy&paste error in RAID6 stripe recovery
     - make error bitmap update atomic

* tag 'for-6.2-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: raid56: make error_bitmap update atomic
  btrfs: send: limit number of clones and allocated memory size
  btrfs: zlib: zero-initialize zlib workspace
  btrfs: limit device extents to the device size
  btrfs: raid56: fix stripes if vertical errors are found
parents d2d11f34 a9ad4d87
...@@ -1426,12 +1426,20 @@ static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bi ...@@ -1426,12 +1426,20 @@ static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bi
u32 bio_size = 0; u32 bio_size = 0;
struct bio_vec *bvec; struct bio_vec *bvec;
struct bvec_iter_all iter_all; struct bvec_iter_all iter_all;
int i;
bio_for_each_segment_all(bvec, bio, iter_all) bio_for_each_segment_all(bvec, bio, iter_all)
bio_size += bvec->bv_len; bio_size += bvec->bv_len;
bitmap_set(rbio->error_bitmap, total_sector_nr, /*
bio_size >> rbio->bioc->fs_info->sectorsize_bits); * Since we can have multiple bios touching the error_bitmap, we cannot
* call bitmap_set() without protection.
*
* Instead use set_bit() for each bit, as set_bit() itself is atomic.
*/
for (i = total_sector_nr; i < total_sector_nr +
(bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
set_bit(i, rbio->error_bitmap);
} }
/* Verify the data sectors at read time. */ /* Verify the data sectors at read time. */
...@@ -1886,7 +1894,7 @@ static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr, ...@@ -1886,7 +1894,7 @@ static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
sector->uptodate = 1; sector->uptodate = 1;
} }
if (failb >= 0) { if (failb >= 0) {
ret = verify_one_sector(rbio, faila, sector_nr); ret = verify_one_sector(rbio, failb, sector_nr);
if (ret < 0) if (ret < 0)
goto cleanup; goto cleanup;
......
...@@ -8073,10 +8073,10 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg) ...@@ -8073,10 +8073,10 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
/* /*
* Check that we don't overflow at later allocations, we request * Check that we don't overflow at later allocations, we request
* clone_sources_count + 1 items, and compare to unsigned long inside * clone_sources_count + 1 items, and compare to unsigned long inside
* access_ok. * access_ok. Also set an upper limit for allocation size so this can't
* easily exhaust memory. Max number of clone sources is about 200K.
*/ */
if (arg->clone_sources_count > if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) {
ULONG_MAX / sizeof(struct clone_root) - 1) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
......
...@@ -1600,7 +1600,7 @@ static int find_free_dev_extent_start(struct btrfs_device *device, ...@@ -1600,7 +1600,7 @@ static int find_free_dev_extent_start(struct btrfs_device *device,
if (ret < 0) if (ret < 0)
goto out; goto out;
while (1) { while (search_start < search_end) {
l = path->nodes[0]; l = path->nodes[0];
slot = path->slots[0]; slot = path->slots[0];
if (slot >= btrfs_header_nritems(l)) { if (slot >= btrfs_header_nritems(l)) {
...@@ -1623,6 +1623,9 @@ static int find_free_dev_extent_start(struct btrfs_device *device, ...@@ -1623,6 +1623,9 @@ static int find_free_dev_extent_start(struct btrfs_device *device,
if (key.type != BTRFS_DEV_EXTENT_KEY) if (key.type != BTRFS_DEV_EXTENT_KEY)
goto next; goto next;
if (key.offset > search_end)
break;
if (key.offset > search_start) { if (key.offset > search_start) {
hole_size = key.offset - search_start; hole_size = key.offset - search_start;
dev_extent_hole_check(device, &search_start, &hole_size, dev_extent_hole_check(device, &search_start, &hole_size,
...@@ -1683,6 +1686,7 @@ static int find_free_dev_extent_start(struct btrfs_device *device, ...@@ -1683,6 +1686,7 @@ static int find_free_dev_extent_start(struct btrfs_device *device,
else else
ret = 0; ret = 0;
ASSERT(max_hole_start + max_hole_size <= search_end);
out: out:
btrfs_free_path(path); btrfs_free_path(path);
*start = max_hole_start; *start = max_hole_start;
......
...@@ -63,7 +63,7 @@ struct list_head *zlib_alloc_workspace(unsigned int level) ...@@ -63,7 +63,7 @@ struct list_head *zlib_alloc_workspace(unsigned int level)
workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
zlib_inflate_workspacesize()); zlib_inflate_workspacesize());
workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL); workspace->strm.workspace = kvzalloc(workspacesize, GFP_KERNEL);
workspace->level = level; workspace->level = level;
workspace->buf = NULL; workspace->buf = NULL;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment