1. 14 Oct, 2015 1 commit
    • Filipe Manana's avatar
      Btrfs: fix file corruption and data loss after cloning inline extents · 8039d87d
      Filipe Manana authored
      Currently the clone ioctl allows to clone an inline extent from one file
      to another that already has other (non-inlined) extents. This is a problem
      because btrfs is not designed to deal with files having inline and regular
      extents, if a file has an inline extent then it must be the only extent
      in the file and must start at file offset 0. Having a file with an inline
      extent followed by regular extents results in EIO errors when doing reads
      or writes against the first 4K of the file.
      
      Also, the clone ioctl allows one to lose data if the source file consists
      of a single inline extent, with a size of N bytes, and the destination
      file consists of a single inline extent with a size of M bytes, where we
      have M > N. In this case the clone operation removes the inline extent
      from the destination file and then copies the inline extent from the
      source file into the destination file - we lose the M - N bytes from the
      destination file, a read operation will get the value 0x00 for any bytes
      in the the range [N, M] (the destination inode's i_size remained as M,
      that's why we can read past N bytes).
      
      So fix this by not allowing such destructive operations to happen and
      return errno EOPNOTSUPP to user space.
      
      Currently the fstest btrfs/035 tests the data loss case but it totally
      ignores this - i.e. expects the operation to succeed and does not check
      the we got data loss.
      
      The following test case for fstests exercises all these cases that result
      in file corruption and data loss:
      
        seq=`basename $0`
        seqres=$RESULT_DIR/$seq
        echo "QA output created by $seq"
        tmp=/tmp/$$
        status=1	# failure is the default!
        trap "_cleanup; exit \$status" 0 1 2 3 15
      
        _cleanup()
        {
            rm -f $tmp.*
        }
      
        # get standard environment, filters and checks
        . ./common/rc
        . ./common/filter
      
        # real QA test starts here
        _need_to_be_root
        _supported_fs btrfs
        _supported_os Linux
        _require_scratch
        _require_cloner
        _require_btrfs_fs_feature "no_holes"
        _require_btrfs_mkfs_feature "no-holes"
      
        rm -f $seqres.full
      
        test_cloning_inline_extents()
        {
            local mkfs_opts=$1
            local mount_opts=$2
      
            _scratch_mkfs $mkfs_opts >>$seqres.full 2>&1
            _scratch_mount $mount_opts
      
            # File bar, the source for all the following clone operations, consists
            # of a single inline extent (50 bytes).
            $XFS_IO_PROG -f -c "pwrite -S 0xbb 0 50" $SCRATCH_MNT/bar \
                | _filter_xfs_io
      
            # Test cloning into a file with an extent (non-inlined) where the
            # destination offset overlaps that extent. It should not be possible to
            # clone the inline extent from file bar into this file.
            $XFS_IO_PROG -f -c "pwrite -S 0xaa 0K 16K" $SCRATCH_MNT/foo \
                | _filter_xfs_io
            $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo
      
            # Doing IO against any range in the first 4K of the file should work.
            # Due to a past clone ioctl bug which allowed cloning the inline extent,
            # these operations resulted in EIO errors.
            echo "File foo data after clone operation:"
            # All bytes should have the value 0xaa (clone operation failed and did
            # not modify our file).
            od -t x1 $SCRATCH_MNT/foo
            $XFS_IO_PROG -c "pwrite -S 0xcc 0 100" $SCRATCH_MNT/foo | _filter_xfs_io
      
            # Test cloning the inline extent against a file which has a hole in its
            # first 4K followed by a non-inlined extent. It should not be possible
            # as well to clone the inline extent from file bar into this file.
            $XFS_IO_PROG -f -c "pwrite -S 0xdd 4K 12K" $SCRATCH_MNT/foo2 \
                | _filter_xfs_io
            $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo2
      
            # Doing IO against any range in the first 4K of the file should work.
            # Due to a past clone ioctl bug which allowed cloning the inline extent,
            # these operations resulted in EIO errors.
            echo "File foo2 data after clone operation:"
            # All bytes should have the value 0x00 (clone operation failed and did
            # not modify our file).
            od -t x1 $SCRATCH_MNT/foo2
            $XFS_IO_PROG -c "pwrite -S 0xee 0 90" $SCRATCH_MNT/foo2 | _filter_xfs_io
      
            # Test cloning the inline extent against a file which has a size of zero
            # but has a prealloc extent. It should not be possible as well to clone
            # the inline extent from file bar into this file.
            $XFS_IO_PROG -f -c "falloc -k 0 1M" $SCRATCH_MNT/foo3 | _filter_xfs_io
            $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo3
      
            # Doing IO against any range in the first 4K of the file should work.
            # Due to a past clone ioctl bug which allowed cloning the inline extent,
            # these operations resulted in EIO errors.
            echo "First 50 bytes of foo3 after clone operation:"
            # Should not be able to read any bytes, file has 0 bytes i_size (the
            # clone operation failed and did not modify our file).
            od -t x1 $SCRATCH_MNT/foo3
            $XFS_IO_PROG -c "pwrite -S 0xff 0 90" $SCRATCH_MNT/foo3 | _filter_xfs_io
      
            # Test cloning the inline extent against a file which consists of a
            # single inline extent that has a size not greater than the size of
            # bar's inline extent (40 < 50).
            # It should be possible to do the extent cloning from bar to this file.
            $XFS_IO_PROG -f -c "pwrite -S 0x01 0 40" $SCRATCH_MNT/foo4 \
                | _filter_xfs_io
            $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo4
      
            # Doing IO against any range in the first 4K of the file should work.
            echo "File foo4 data after clone operation:"
            # Must match file bar's content.
            od -t x1 $SCRATCH_MNT/foo4
            $XFS_IO_PROG -c "pwrite -S 0x02 0 90" $SCRATCH_MNT/foo4 | _filter_xfs_io
      
            # Test cloning the inline extent against a file which consists of a
            # single inline extent that has a size greater than the size of bar's
            # inline extent (60 > 50).
            # It should not be possible to clone the inline extent from file bar
            # into this file.
            $XFS_IO_PROG -f -c "pwrite -S 0x03 0 60" $SCRATCH_MNT/foo5 \
                | _filter_xfs_io
            $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo5
      
            # Reading the file should not fail.
            echo "File foo5 data after clone operation:"
            # Must have a size of 60 bytes, with all bytes having a value of 0x03
            # (the clone operation failed and did not modify our file).
            od -t x1 $SCRATCH_MNT/foo5
      
            # Test cloning the inline extent against a file which has no extents but
            # has a size greater than bar's inline extent (16K > 50).
            # It should not be possible to clone the inline extent from file bar
            # into this file.
            $XFS_IO_PROG -f -c "truncate 16K" $SCRATCH_MNT/foo6 | _filter_xfs_io
            $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo6
      
            # Reading the file should not fail.
            echo "File foo6 data after clone operation:"
            # Must have a size of 16K, with all bytes having a value of 0x00 (the
            # clone operation failed and did not modify our file).
            od -t x1 $SCRATCH_MNT/foo6
      
            # Test cloning the inline extent against a file which has no extents but
            # has a size not greater than bar's inline extent (30 < 50).
            # It should be possible to clone the inline extent from file bar into
            # this file.
            $XFS_IO_PROG -f -c "truncate 30" $SCRATCH_MNT/foo7 | _filter_xfs_io
            $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo7
      
            # Reading the file should not fail.
            echo "File foo7 data after clone operation:"
            # Must have a size of 50 bytes, with all bytes having a value of 0xbb.
            od -t x1 $SCRATCH_MNT/foo7
      
            # Test cloning the inline extent against a file which has a size not
            # greater than the size of bar's inline extent (20 < 50) but has
            # a prealloc extent that goes beyond the file's size. It should not be
            # possible to clone the inline extent from bar into this file.
            $XFS_IO_PROG -f -c "falloc -k 0 1M" \
                            -c "pwrite -S 0x88 0 20" \
                            $SCRATCH_MNT/foo8 | _filter_xfs_io
            $CLONER_PROG -s 0 -d 0 -l 0 $SCRATCH_MNT/bar $SCRATCH_MNT/foo8
      
            echo "File foo8 data after clone operation:"
            # Must have a size of 20 bytes, with all bytes having a value of 0x88
            # (the clone operation did not modify our file).
            od -t x1 $SCRATCH_MNT/foo8
      
            _scratch_unmount
        }
      
        echo -e "\nTesting without compression and without the no-holes feature...\n"
        test_cloning_inline_extents
      
        echo -e "\nTesting with compression and without the no-holes feature...\n"
        test_cloning_inline_extents "" "-o compress"
      
        echo -e "\nTesting without compression and with the no-holes feature...\n"
        test_cloning_inline_extents "-O no-holes" ""
      
        echo -e "\nTesting with compression and with the no-holes feature...\n"
        test_cloning_inline_extents "-O no-holes" "-o compress"
      
        status=0
        exit
      
      Cc: stable@vger.kernel.org
      Signed-off-by: default avatarFilipe Manana <fdmanana@suse.com>
      8039d87d
  2. 13 Oct, 2015 2 commits
    • Robin Ruede's avatar
      btrfs: fix resending received snapshot with parent · b96b1db0
      Robin Ruede authored
      This fixes a regression introduced by 37b8d27d between v4.1 and v4.2.
      
      When a snapshot is received, its received_uuid is set to the original
      uuid of the subvolume. When that snapshot is then resent to a third
      filesystem, it's received_uuid is set to the second uuid
      instead of the original one. The same was true for the parent_uuid.
      This behaviour was partially changed in 37b8d27d, but in that patch
      only the parent_uuid was taken from the real original,
      not the uuid itself, causing the search for the parent to fail in
      the case below.
      
      This happens for example when trying to send a series of linked
      snapshots (e.g. created by snapper) from the backup file system back
      to the original one.
      
      The following commands reproduce the issue in v4.2.1
      (no error in 4.1.6)
      
          # setup three test file systems
          for i in 1 2 3; do
      	    truncate -s 50M fs$i
      	    mkfs.btrfs fs$i
      	    mkdir $i
      	    mount fs$i $i
          done
          echo "content" > 1/testfile
          btrfs su snapshot -r 1/ 1/snap1
          echo "changed content" > 1/testfile
          btrfs su snapshot -r 1/ 1/snap2
      
          # works fine:
          btrfs send 1/snap1 | btrfs receive 2/
          btrfs send -p 1/snap1 1/snap2 | btrfs receive 2/
      
          # ERROR: could not find parent subvolume
          btrfs send 2/snap1 | btrfs receive 3/
          btrfs send -p 2/snap1 2/snap2 | btrfs receive 3/
      Signed-off-by: default avatarRobin Ruede <rruede+git@gmail.com>
      Fixes: 37b8d27d ("Btrfs: use received_uuid of parent during send")
      Cc: stable@vger.kernel.org # v4.2+
      Reviewed-by: default avatarFilipe Manana <fdmanana@suse.com>
      Tested-by: default avatarEd Tomlinson <edt@aei.ca>
      b96b1db0
    • Filipe Manana's avatar
      Btrfs: send, fix file corruption due to incorrect cloning operations · d906d49f
      Filipe Manana authored
      If we have a file that shares an extent with other files, when processing
      the extent item relative to a shared extent, we blindly issue a clone
      operation that will target a length matching the length in the extent item
      and uses as a source some other file the receiver already has and points
      to the same extent. However that range in the other file might not
      exclusively point only to the shared extent, and so using that length
      will result in the receiver getting a file with different data from the
      one in the send snapshot. This issue happened both for incremental and
      full send operations.
      
      So fix this by issuing clone operations with lengths that don't cover
      regions of the source file that point to different extents (or have holes).
      
      The following test case for fstests reproduces the problem.
      
        seq=`basename $0`
        seqres=$RESULT_DIR/$seq
        echo "QA output created by $seq"
      
        tmp=/tmp/$$
        status=1	# failure is the default!
        trap "_cleanup; exit \$status" 0 1 2 3 15
      
        _cleanup()
        {
            rm -fr $send_files_dir
            rm -f $tmp.*
        }
      
        # get standard environment, filters and checks
        . ./common/rc
        . ./common/filter
      
        # real QA test starts here
        _supported_fs btrfs
        _supported_os Linux
        _require_scratch
        _need_to_be_root
        _require_cp_reflink
        _require_xfs_io_command "fpunch"
      
        send_files_dir=$TEST_DIR/btrfs-test-$seq
      
        rm -f $seqres.full
        rm -fr $send_files_dir
        mkdir $send_files_dir
      
        _scratch_mkfs >>$seqres.full 2>&1
        _scratch_mount
      
        # Create our test file with a single 100K extent.
        $XFS_IO_PROG -f -c "pwrite -S 0xaa 0K 100K" \
           $SCRATCH_MNT/foo | _filter_xfs_io
      
        # Clone our file into a new file named bar.
        cp --reflink=always $SCRATCH_MNT/foo $SCRATCH_MNT/bar
      
        # Now overwrite parts of our foo file.
        $XFS_IO_PROG -c "pwrite -S 0xbb 50K 10K" \
           -c "pwrite -S 0xcc 90K 10K" \
           -c "fpunch 70K 10k" \
           $SCRATCH_MNT/foo | _filter_xfs_io
      
        _run_btrfs_util_prog subvolume snapshot -r $SCRATCH_MNT \
           $SCRATCH_MNT/snap
      
        echo "File digests in the original filesystem:"
        md5sum $SCRATCH_MNT/snap/foo | _filter_scratch
        md5sum $SCRATCH_MNT/snap/bar | _filter_scratch
      
        _run_btrfs_util_prog send $SCRATCH_MNT/snap -f $send_files_dir/1.snap
      
        # Now recreate the filesystem by receiving the send stream and verify
        # we get the same file contents that the original filesystem had.
        _scratch_unmount
        _scratch_mkfs >>$seqres.full 2>&1
        _scratch_mount
      
        _run_btrfs_util_prog receive $SCRATCH_MNT -f $send_files_dir/1.snap
      
        # We expect the destination filesystem to have exactly the same file
        # data as the original filesystem.
        # The btrfs send implementation had a bug where it sent a clone
        # operation from file foo into file bar covering the whole [0, 100K[
        # range after creating and writing the file foo. This was incorrect
        # because the file bar now included the updates done to file foo after
        # we cloned foo to bar, breaking the COW nature of reflink copies
        # (cloned extents).
        echo "File digests in the new filesystem:"
        md5sum $SCRATCH_MNT/snap/foo | _filter_scratch
        md5sum $SCRATCH_MNT/snap/bar | _filter_scratch
      
        status=0
        exit
      
      Another test case that reproduces the problem when we have compressed
      extents:
      
        seq=`basename $0`
        seqres=$RESULT_DIR/$seq
        echo "QA output created by $seq"
      
        tmp=/tmp/$$
        status=1	# failure is the default!
        trap "_cleanup; exit \$status" 0 1 2 3 15
      
        _cleanup()
        {
            rm -fr $send_files_dir
            rm -f $tmp.*
        }
      
        # get standard environment, filters and checks
        . ./common/rc
        . ./common/filter
      
        # real QA test starts here
        _supported_fs btrfs
        _supported_os Linux
        _require_scratch
        _need_to_be_root
        _require_cp_reflink
      
        send_files_dir=$TEST_DIR/btrfs-test-$seq
      
        rm -f $seqres.full
        rm -fr $send_files_dir
        mkdir $send_files_dir
      
        _scratch_mkfs >>$seqres.full 2>&1
        _scratch_mount "-o compress"
      
        # Create our file with an extent of 100K starting at file offset 0K.
        $XFS_IO_PROG -f -c "pwrite -S 0xaa 0K 100K"       \
                        -c "fsync"                        \
                        $SCRATCH_MNT/foo | _filter_xfs_io
      
        # Rewrite part of the previous extent (its first 40K) and write a new
        # 100K extent starting at file offset 100K.
        $XFS_IO_PROG -c "pwrite -S 0xbb 0K 40K"    \
                -c "pwrite -S 0xcc 100K 100K"      \
                $SCRATCH_MNT/foo | _filter_xfs_io
      
        # Our file foo now has 3 file extent items in its metadata:
        #
        # 1) One covering the file range 0 to 40K;
        # 2) One covering the file range 40K to 100K, which points to the first
        #    extent we wrote to the file and has a data offset field with value
        #    40K (our file no longer uses the first 40K of data from that
        #    extent);
        # 3) One covering the file range 100K to 200K.
      
        # Now clone our file foo into file bar.
        cp --reflink=always $SCRATCH_MNT/foo $SCRATCH_MNT/bar
      
        # Create our snapshot for the send operation.
        _run_btrfs_util_prog subvolume snapshot -r $SCRATCH_MNT \
                $SCRATCH_MNT/snap
      
        echo "File digests in the original filesystem:"
        md5sum $SCRATCH_MNT/snap/foo | _filter_scratch
        md5sum $SCRATCH_MNT/snap/bar | _filter_scratch
      
        _run_btrfs_util_prog send $SCRATCH_MNT/snap -f $send_files_dir/1.snap
      
        # Now recreate the filesystem by receiving the send stream and verify we
        # get the same file contents that the original filesystem had.
        # Btrfs send used to issue a clone operation from foo's range
        # [80K, 140K[ to bar's range [40K, 100K[ when cloning the extent pointed
        # to by foo's second file extent item, this was incorrect because of bad
        # accounting of the file extent item's data offset field. The correct
        # range to clone from should have been [40K, 100K[.
        _scratch_unmount
        _scratch_mkfs >>$seqres.full 2>&1
        _scratch_mount "-o compress"
      
        _run_btrfs_util_prog receive $SCRATCH_MNT -f $send_files_dir/1.snap
      
        echo "File digests in the new filesystem:"
        # Must match the digests we got in the original filesystem.
        md5sum $SCRATCH_MNT/snap/foo | _filter_scratch
        md5sum $SCRATCH_MNT/snap/bar | _filter_scratch
      
        status=0
        exit
      Signed-off-by: default avatarFilipe Manana <fdmanana@suse.com>
      d906d49f
  3. 12 Oct, 2015 3 commits
  4. 11 Oct, 2015 8 commits
  5. 10 Oct, 2015 18 commits
  6. 09 Oct, 2015 8 commits