Commit eefaf0a1 authored by David Sterba's avatar David Sterba

btrfs: fix typos found by codespell

Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 4618d0a6
......@@ -626,7 +626,7 @@ static bool should_async_write(struct btrfs_bio *bbio)
/*
* Submit bio to an async queue.
*
* Return true if the work has been succesfuly submitted, else false.
* Return true if the work has been successfully submitted, else false.
*/
static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
struct btrfs_io_context *bioc,
......
......@@ -2882,7 +2882,7 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
goto unlock_out;
/*
* Skip chunk alloction if the bg is SYSTEM, this is to avoid system
* Skip chunk allocation if the bg is SYSTEM, this is to avoid system
* chunk allocation storm to exhaust the system chunk array. Otherwise
* we still want to try our best to mark the block group read-only.
*/
......
......@@ -4041,7 +4041,7 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
if (check_eb_range(eb, start, len)) {
/*
* Invalid range hit, reset the memory, so callers won't get
* some random garbage for their uninitialzed memory.
* some random garbage for their uninitialized memory.
*/
memset(dstv, 0, len);
return;
......
......@@ -9,7 +9,7 @@
*
* @cache: The cache.
* @max_size: Maximum size (number of entries) for the cache.
* Use 0 for unlimited size, it's the user's responsability to
* Use 0 for unlimited size, it's the user's responsibility to
* trim the cache in that case.
*/
void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size)
......
......@@ -194,7 +194,7 @@ static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
*
* Must be called with qgroup_lock held and @prealloc preallocated.
*
* The control on the lifespan of @prealloc would be transfered to this
* The control on the lifespan of @prealloc would be transferred to this
* function, thus caller should no longer touch @prealloc.
*/
static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
......
......@@ -43,7 +43,7 @@ struct scrub_ctx;
/*
* The following value only influences the performance.
*
* This detemines how many stripes would be submitted in one go,
* This determines how many stripes would be submitted in one go,
* which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
*/
#define SCRUB_STRIPES_PER_GROUP 8
......@@ -709,7 +709,7 @@ static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
/* Metadata, verify the full tree block. */
if (sector->is_metadata) {
/*
* Check if the tree block crosses the stripe boudary. If
* Check if the tree block crosses the stripe boundary. If
* crossed the boundary, we cannot verify it but only give a
* warning.
*
......@@ -883,7 +883,7 @@ static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
/*
* Init needed infos for error reporting.
*
* Although our scrub_stripe infrastucture is mostly based on btrfs_submit_bio()
* Although our scrub_stripe infrastructure is mostly based on btrfs_submit_bio()
* thus no need for dev/physical, error reporting still needs dev and physical.
*/
if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
......@@ -1812,7 +1812,7 @@ static int flush_scrub_stripes(struct scrub_ctx *sctx)
if (sctx->is_dev_replace) {
/*
* For dev-replace, if we know there is something wrong with
* metadata, we should immedately abort.
* metadata, we should immediately abort.
*/
for (int i = 0; i < nr_stripes; i++) {
if (stripe_has_metadata_error(&sctx->stripes[i])) {
......
......@@ -22,7 +22,7 @@ struct btrfs_tree_parent_check {
/*
* Expected transid, can be 0 to skip the check, but such skip
* should only be utlized for backref walk related code.
* should only be utilized for backref walk related code.
*/
u64 transid;
......
......@@ -613,7 +613,7 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes)
}
/*
* Do the type safe converstion from stripe_nr to offset inside the chunk.
* Do the type safe conversion from stripe_nr to offset inside the chunk.
*
* @stripe_nr is u32, with left shift it can overflow u32 for chunks larger
* than 4G. This does the proper type cast to avoid overflow.
......
......@@ -319,7 +319,7 @@ static inline bool btrfs_check_device_zone_type(const struct btrfs_fs_info *fs_i
(bdev_zone_sectors(bdev) << SECTOR_SHIFT);
}
/* Do not allow Host Manged zoned device */
/* Do not allow Host Managed zoned device. */
return bdev_zoned_model(bdev) != BLK_ZONED_HM;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment