Commit bb7ab3b9 authored by Adam Buchbinder's avatar Adam Buchbinder Committed by David Sterba

btrfs: Fix misspellings in comments.

Signed-off-by: default avatarAdam Buchbinder <adam.buchbinder@gmail.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 2e3fcb1c
......@@ -178,7 +178,7 @@ struct btrfsic_block {
* Elements of this type are allocated dynamically and required because
* each block object can refer to and can be ref from multiple blocks.
* The key to lookup them in the hashtable is the dev_bytenr of
* the block ref to plus the one from the block refered from.
* the block ref to plus the one from the block referred from.
* The fact that they are searchable via a hashtable and that a
* ref_cnt is maintained is not required for the btrfs integrity
* check algorithm itself, it is only used to make the output more
......
......@@ -788,7 +788,7 @@ struct btrfs_root_item {
/*
* This generation number is used to test if the new fields are valid
* and up to date while reading the root item. Everytime the root item
* and up to date while reading the root item. Every time the root item
* is written out, the "generation" field is copied into this field. If
* anyone ever mounted the fs with an older kernel, we will have
* mismatching generation values here and thus must invalidate the
......@@ -1219,10 +1219,10 @@ struct btrfs_space_info {
* we've called update_block_group and dropped the bytes_used counter
* and increased the bytes_pinned counter. However this means that
* bytes_pinned does not reflect the bytes that will be pinned once the
* delayed refs are flushed, so this counter is inc'ed everytime we call
* btrfs_free_extent so it is a realtime count of what will be freed
* once the transaction is committed. It will be zero'ed everytime the
* transaction commits.
* delayed refs are flushed, so this counter is inc'ed every time we
* call btrfs_free_extent so it is a realtime count of what will be
* freed once the transaction is committed. It will be zero'ed every
* time the transaction commits.
*/
struct percpu_counter total_bytes_pinned;
......
......@@ -858,7 +858,7 @@ int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
* not called and the the filesystem is remounted
* in degraded state. This does not stop the
* dev_replace procedure. It needs to be canceled
* manually if the cancelation is wanted.
* manually if the cancellation is wanted.
*/
break;
}
......
......@@ -816,7 +816,7 @@ static void run_one_async_done(struct btrfs_work *work)
waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait);
/* If an error occured we just want to clean up the bio and move on */
/* If an error occurred we just want to clean up the bio and move on */
if (async->error) {
async->bio->bi_error = async->error;
bio_endio(async->bio);
......
......@@ -5758,7 +5758,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
/*
* This is tricky, but first we need to figure out how much we
* free'd from any free-ers that occured during this
* free'd from any free-ers that occurred during this
* reservation, so we reset ->csum_bytes to the csum_bytes
* before we dropped our lock, and then call the free for the
* number of bytes that were freed while we were trying our
......
......@@ -62,7 +62,7 @@ struct extent_map *alloc_extent_map(void)
/**
* free_extent_map - drop reference count of an extent_map
* @em: extent map beeing releasead
* @em: extent map being releasead
*
* Drops the reference out on @em by one and free the structure
* if the reference count hits zero.
......@@ -422,7 +422,7 @@ struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
/**
* remove_extent_mapping - removes an extent_map from the extent tree
* @tree: extent tree to remove from
* @em: extent map beeing removed
* @em: extent map being removed
*
* Removes @em from @tree. No reference counts are dropped, and no checks
* are done to see if the range is in use
......
......@@ -1847,7 +1847,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
/*
* We also have to set last_sub_trans to the current log transid,
* otherwise subsequent syncs to a file that's been synced in this
* transaction will appear to have already occured.
* transaction will appear to have already occurred.
*/
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->last_sub_trans = root->log_transid;
......
......@@ -1010,7 +1010,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
for (; node; node = rb_prev(node)) {
test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
/* We treat this entry as if it doesnt exist */
/* We treat this entry as if it doesn't exist */
if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
continue;
if (test->file_offset + test->len <= disk_i_size)
......
......@@ -1046,7 +1046,7 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
/*
* NOTE: we have searched root tree and checked the
* coresponding ref, it does not need to check again.
* corresponding ref, it does not need to check again.
*/
*search_done = 1;
}
......
......@@ -2749,7 +2749,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
em->start + em->len < chunk_offset) {
/*
* This is a logic error, but we don't want to just rely on the
* user having built with ASSERT enabled, so if ASSERT doens't
* user having built with ASSERT enabled, so if ASSERT doesn't
* do anything we still error out.
*/
ASSERT(0);
......@@ -4119,7 +4119,7 @@ static int btrfs_uuid_scan_kthread(void *data)
* Callback for btrfs_uuid_tree_iterate().
* returns:
* 0 check succeeded, the entry is not outdated.
* < 0 if an error occured.
* < 0 if an error occurred.
* > 0 if the check failed, which means the caller shall remove the entry.
*/
static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment