Commit 7e755aa7 authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: utilize folio more in btrfs_page_mkwrite()

We already have a folio that we're using in btrfs_page_mkwrite, update
the rest of the function to use folio everywhere else.  This will make
it easier on Willy when he drops page->index.
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent c808c1dc
...@@ -1920,8 +1920,8 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) ...@@ -1920,8 +1920,8 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
reserved_space = PAGE_SIZE; reserved_space = PAGE_SIZE;
sb_start_pagefault(inode->i_sb); sb_start_pagefault(inode->i_sb);
page_start = page_offset(page); page_start = folio_pos(folio);
page_end = page_start + PAGE_SIZE - 1; page_end = page_start + folio_size(folio) - 1;
end = page_end; end = page_end;
/* /*
...@@ -1949,18 +1949,18 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) ...@@ -1949,18 +1949,18 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
ret = VM_FAULT_NOPAGE; ret = VM_FAULT_NOPAGE;
again: again:
down_read(&BTRFS_I(inode)->i_mmap_lock); down_read(&BTRFS_I(inode)->i_mmap_lock);
lock_page(page); folio_lock(folio);
size = i_size_read(inode); size = i_size_read(inode);
if ((page->mapping != inode->i_mapping) || if ((folio->mapping != inode->i_mapping) ||
(page_start >= size)) { (page_start >= size)) {
/* Page got truncated out from underneath us. */ /* Page got truncated out from underneath us. */
goto out_unlock; goto out_unlock;
} }
wait_on_page_writeback(page); folio_wait_writeback(folio);
lock_extent(io_tree, page_start, page_end, &cached_state); lock_extent(io_tree, page_start, page_end, &cached_state);
ret2 = set_page_extent_mapped(page); ret2 = set_folio_extent_mapped(folio);
if (ret2 < 0) { if (ret2 < 0) {
ret = vmf_error(ret2); ret = vmf_error(ret2);
unlock_extent(io_tree, page_start, page_end, &cached_state); unlock_extent(io_tree, page_start, page_end, &cached_state);
...@@ -1974,14 +1974,14 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) ...@@ -1974,14 +1974,14 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE); ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE);
if (ordered) { if (ordered) {
unlock_extent(io_tree, page_start, page_end, &cached_state); unlock_extent(io_tree, page_start, page_end, &cached_state);
unlock_page(page); folio_unlock(folio);
up_read(&BTRFS_I(inode)->i_mmap_lock); up_read(&BTRFS_I(inode)->i_mmap_lock);
btrfs_start_ordered_extent(ordered); btrfs_start_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
goto again; goto again;
} }
if (page->index == ((size - 1) >> PAGE_SHIFT)) { if (folio->index == ((size - 1) >> PAGE_SHIFT)) {
reserved_space = round_up(size - page_start, fs_info->sectorsize); reserved_space = round_up(size - page_start, fs_info->sectorsize);
if (reserved_space < PAGE_SIZE) { if (reserved_space < PAGE_SIZE) {
end = page_start + reserved_space - 1; end = page_start + reserved_space - 1;
...@@ -2011,13 +2011,13 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) ...@@ -2011,13 +2011,13 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
} }
/* Page is wholly or partially inside EOF. */ /* Page is wholly or partially inside EOF. */
if (page_start + PAGE_SIZE > size) if (page_start + folio_size(folio) > size)
zero_start = offset_in_page(size); zero_start = offset_in_folio(folio, size);
else else
zero_start = PAGE_SIZE; zero_start = PAGE_SIZE;
if (zero_start != PAGE_SIZE) if (zero_start != PAGE_SIZE)
memzero_page(page, zero_start, PAGE_SIZE - zero_start); folio_zero_range(folio, zero_start, folio_size(folio) - zero_start);
btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE); btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start); btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
...@@ -2034,7 +2034,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf) ...@@ -2034,7 +2034,7 @@ static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
return VM_FAULT_LOCKED; return VM_FAULT_LOCKED;
out_unlock: out_unlock:
unlock_page(page); folio_unlock(folio);
up_read(&BTRFS_I(inode)->i_mmap_lock); up_read(&BTRFS_I(inode)->i_mmap_lock);
out: out:
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment