Commit d0082371 authored by Jeff Mahoney's avatar Jeff Mahoney Committed by David Sterba

btrfs: drop gfp_t from lock_extent

 lock_extent and unlock_extent are always called with GFP_NOFS, drop the
 argument and use GFP_NOFS consistently.
Signed-off-by: default avatarJeff Mahoney <jeffm@suse.com>
parent 143bede5
...@@ -496,7 +496,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -496,7 +496,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* sure they map to this compressed extent on disk. * sure they map to this compressed extent on disk.
*/ */
set_page_extent_mapped(page); set_page_extent_mapped(page);
lock_extent(tree, last_offset, end, GFP_NOFS); lock_extent(tree, last_offset, end);
read_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, last_offset, em = lookup_extent_mapping(em_tree, last_offset,
PAGE_CACHE_SIZE); PAGE_CACHE_SIZE);
...@@ -506,7 +506,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -506,7 +506,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
(last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
(em->block_start >> 9) != cb->orig_bio->bi_sector) { (em->block_start >> 9) != cb->orig_bio->bi_sector) {
free_extent_map(em); free_extent_map(em);
unlock_extent(tree, last_offset, end, GFP_NOFS); unlock_extent(tree, last_offset, end);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
break; break;
...@@ -534,7 +534,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -534,7 +534,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
nr_pages++; nr_pages++;
page_cache_release(page); page_cache_release(page);
} else { } else {
unlock_extent(tree, last_offset, end, GFP_NOFS); unlock_extent(tree, last_offset, end);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
break; break;
......
...@@ -332,7 +332,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, ...@@ -332,7 +332,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
return 0; return 0;
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
0, &cached_state, GFP_NOFS); 0, &cached_state);
if (extent_buffer_uptodate(io_tree, eb, cached_state) && if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
btrfs_header_generation(eb) == parent_transid) { btrfs_header_generation(eb) == parent_transid) {
ret = 0; ret = 0;
......
...@@ -1171,42 +1171,40 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, ...@@ -1171,42 +1171,40 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
* us if waiting is desired. * us if waiting is desired.
*/ */
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, struct extent_state **cached_state, gfp_t mask) int bits, struct extent_state **cached_state)
{ {
int err; int err;
u64 failed_start; u64 failed_start;
while (1) { while (1) {
err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
EXTENT_LOCKED, &failed_start, EXTENT_LOCKED, &failed_start,
cached_state, mask); cached_state, GFP_NOFS);
if (err == -EEXIST && (mask & __GFP_WAIT)) { if (err == -EEXIST) {
wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
start = failed_start; start = failed_start;
} else { } else
break; break;
}
WARN_ON(start > end); WARN_ON(start > end);
} }
return err; return err;
} }
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{ {
return lock_extent_bits(tree, start, end, 0, NULL, mask); return lock_extent_bits(tree, start, end, 0, NULL);
} }
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
gfp_t mask)
{ {
int err; int err;
u64 failed_start; u64 failed_start;
err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
&failed_start, NULL, mask); &failed_start, NULL, GFP_NOFS);
if (err == -EEXIST) { if (err == -EEXIST) {
if (failed_start > start) if (failed_start > start)
clear_extent_bit(tree, start, failed_start - 1, clear_extent_bit(tree, start, failed_start - 1,
EXTENT_LOCKED, 1, 0, NULL, mask); EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
return 0; return 0;
} }
return 1; return 1;
...@@ -1219,10 +1217,10 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -1219,10 +1217,10 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
mask); mask);
} }
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{ {
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
mask); GFP_NOFS);
} }
/* /*
...@@ -1518,8 +1516,7 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode, ...@@ -1518,8 +1516,7 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
BUG_ON(ret); BUG_ON(ret);
/* step three, lock the state bits for the whole range */ /* step three, lock the state bits for the whole range */
lock_extent_bits(tree, delalloc_start, delalloc_end, lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
0, &cached_state, GFP_NOFS);
/* then test to make sure it is all still delalloc */ /* then test to make sure it is all still delalloc */
ret = test_range_bit(tree, delalloc_start, delalloc_end, ret = test_range_bit(tree, delalloc_start, delalloc_end,
...@@ -2557,11 +2554,11 @@ static int __extent_read_full_page(struct extent_io_tree *tree, ...@@ -2557,11 +2554,11 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
end = page_end; end = page_end;
while (1) { while (1) {
lock_extent(tree, start, end, GFP_NOFS); lock_extent(tree, start, end);
ordered = btrfs_lookup_ordered_extent(inode, start); ordered = btrfs_lookup_ordered_extent(inode, start);
if (!ordered) if (!ordered)
break; break;
unlock_extent(tree, start, end, GFP_NOFS); unlock_extent(tree, start, end);
btrfs_start_ordered_extent(inode, ordered, 1); btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
} }
...@@ -2598,7 +2595,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, ...@@ -2598,7 +2595,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
end - cur + 1, 0); end - cur + 1, 0);
if (IS_ERR_OR_NULL(em)) { if (IS_ERR_OR_NULL(em)) {
SetPageError(page); SetPageError(page);
unlock_extent(tree, cur, end, GFP_NOFS); unlock_extent(tree, cur, end);
break; break;
} }
extent_offset = cur - em->start; extent_offset = cur - em->start;
...@@ -2650,7 +2647,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, ...@@ -2650,7 +2647,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
if (test_range_bit(tree, cur, cur_end, if (test_range_bit(tree, cur, cur_end,
EXTENT_UPTODATE, 1, NULL)) { EXTENT_UPTODATE, 1, NULL)) {
check_page_uptodate(tree, page); check_page_uptodate(tree, page);
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize; cur = cur + iosize;
pg_offset += iosize; pg_offset += iosize;
continue; continue;
...@@ -2660,7 +2657,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, ...@@ -2660,7 +2657,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
*/ */
if (block_start == EXTENT_MAP_INLINE) { if (block_start == EXTENT_MAP_INLINE) {
SetPageError(page); SetPageError(page);
unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize; cur = cur + iosize;
pg_offset += iosize; pg_offset += iosize;
continue; continue;
...@@ -3274,7 +3271,7 @@ int extent_invalidatepage(struct extent_io_tree *tree, ...@@ -3274,7 +3271,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
if (start > end) if (start > end)
return 0; return 0;
lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); lock_extent_bits(tree, start, end, 0, &cached_state);
wait_on_page_writeback(page); wait_on_page_writeback(page);
clear_extent_bit(tree, start, end, clear_extent_bit(tree, start, end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
...@@ -3488,7 +3485,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, ...@@ -3488,7 +3485,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
} }
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
&cached_state, GFP_NOFS); &cached_state);
em = get_extent_skip_holes(inode, start, last_for_get_extent, em = get_extent_skip_holes(inode, start, last_for_get_extent,
get_extent); get_extent);
......
...@@ -182,14 +182,13 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page); ...@@ -182,14 +182,13 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page);
int try_release_extent_state(struct extent_map_tree *map, int try_release_extent_state(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page, struct extent_io_tree *tree, struct page *page,
gfp_t mask); gfp_t mask);
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, struct extent_state **cached, gfp_t mask); int bits, struct extent_state **cached);
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached, gfp_t mask); struct extent_state **cached, gfp_t mask);
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
gfp_t mask);
int extent_read_full_page(struct extent_io_tree *tree, struct page *page, int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
get_extent_t *get_extent, int mirror_num); get_extent_t *get_extent, int mirror_num);
int __init extent_io_init(void); int __init extent_io_init(void);
......
...@@ -1105,8 +1105,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, ...@@ -1105,8 +1105,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
if (start_pos < inode->i_size) { if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_extent_bits(&BTRFS_I(inode)->io_tree,
start_pos, last_pos - 1, 0, &cached_state, start_pos, last_pos - 1, 0, &cached_state);
GFP_NOFS);
ordered = btrfs_lookup_first_ordered_extent(inode, ordered = btrfs_lookup_first_ordered_extent(inode,
last_pos - 1); last_pos - 1);
if (ordered && if (ordered &&
...@@ -1638,7 +1637,7 @@ static long btrfs_fallocate(struct file *file, int mode, ...@@ -1638,7 +1637,7 @@ static long btrfs_fallocate(struct file *file, int mode,
* transaction * transaction
*/ */
lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
locked_end, 0, &cached_state, GFP_NOFS); locked_end, 0, &cached_state);
ordered = btrfs_lookup_first_ordered_extent(inode, ordered = btrfs_lookup_first_ordered_extent(inode,
alloc_end - 1); alloc_end - 1);
if (ordered && if (ordered &&
...@@ -1737,7 +1736,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin) ...@@ -1737,7 +1736,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
return -ENXIO; return -ENXIO;
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
&cached_state, GFP_NOFS); &cached_state);
/* /*
* Delalloc is such a pain. If we have a hole and we have pending * Delalloc is such a pain. If we have a hole and we have pending
......
...@@ -869,7 +869,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, ...@@ -869,7 +869,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
io_ctl_prepare_pages(&io_ctl, inode, 0); io_ctl_prepare_pages(&io_ctl, inode, 0);
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
0, &cached_state, GFP_NOFS); 0, &cached_state);
node = rb_first(&ctl->free_space_offset); node = rb_first(&ctl->free_space_offset);
if (!node && cluster) { if (!node && cluster) {
......
...@@ -597,7 +597,7 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -597,7 +597,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
lock_extent(io_tree, async_extent->start, lock_extent(io_tree, async_extent->start,
async_extent->start + async_extent->start +
async_extent->ram_size - 1, GFP_NOFS); async_extent->ram_size - 1);
/* allocate blocks */ /* allocate blocks */
ret = cow_file_range(inode, async_cow->locked_page, ret = cow_file_range(inode, async_cow->locked_page,
...@@ -625,8 +625,7 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -625,8 +625,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
} }
lock_extent(io_tree, async_extent->start, lock_extent(io_tree, async_extent->start,
async_extent->start + async_extent->ram_size - 1, async_extent->start + async_extent->ram_size - 1);
GFP_NOFS);
trans = btrfs_join_transaction(root); trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans)); BUG_ON(IS_ERR(trans));
...@@ -649,7 +648,7 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -649,7 +648,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
async_extent->pages = NULL; async_extent->pages = NULL;
unlock_extent(io_tree, async_extent->start, unlock_extent(io_tree, async_extent->start,
async_extent->start + async_extent->start +
async_extent->ram_size - 1, GFP_NOFS); async_extent->ram_size - 1);
goto retry; goto retry;
} }
...@@ -1574,7 +1573,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) ...@@ -1574,7 +1573,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
&cached_state, GFP_NOFS); &cached_state);
/* already ordered? We're done */ /* already ordered? We're done */
if (PagePrivate2(page)) if (PagePrivate2(page))
...@@ -1765,7 +1764,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ...@@ -1765,7 +1764,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
lock_extent_bits(io_tree, ordered_extent->file_offset, lock_extent_bits(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1, ordered_extent->file_offset + ordered_extent->len - 1,
0, &cached_state, GFP_NOFS); 0, &cached_state);
if (nolock) if (nolock)
trans = btrfs_join_transaction_nolock(root); trans = btrfs_join_transaction_nolock(root);
...@@ -3285,8 +3284,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) ...@@ -3285,8 +3284,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
} }
wait_on_page_writeback(page); wait_on_page_writeback(page);
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
GFP_NOFS);
set_page_extent_mapped(page); set_page_extent_mapped(page);
ordered = btrfs_lookup_ordered_extent(inode, page_start); ordered = btrfs_lookup_ordered_extent(inode, page_start);
...@@ -3362,7 +3360,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) ...@@ -3362,7 +3360,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
btrfs_wait_ordered_range(inode, hole_start, btrfs_wait_ordered_range(inode, hole_start,
block_end - hole_start); block_end - hole_start);
lock_extent_bits(io_tree, hole_start, block_end - 1, 0, lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
&cached_state, GFP_NOFS); &cached_state);
ordered = btrfs_lookup_ordered_extent(inode, hole_start); ordered = btrfs_lookup_ordered_extent(inode, hole_start);
if (!ordered) if (!ordered)
break; break;
...@@ -5604,7 +5602,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, ...@@ -5604,7 +5602,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
free_extent_map(em); free_extent_map(em);
/* DIO will do one hole at a time, so just unlock a sector */ /* DIO will do one hole at a time, so just unlock a sector */
unlock_extent(&BTRFS_I(inode)->io_tree, start, unlock_extent(&BTRFS_I(inode)->io_tree, start,
start + root->sectorsize - 1, GFP_NOFS); start + root->sectorsize - 1);
return 0; return 0;
} }
...@@ -5745,7 +5743,7 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) ...@@ -5745,7 +5743,7 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
} while (bvec <= bvec_end); } while (bvec <= bvec_end);
unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
dip->logical_offset + dip->bytes - 1, GFP_NOFS); dip->logical_offset + dip->bytes - 1);
bio->bi_private = dip->private; bio->bi_private = dip->private;
kfree(dip->csums); kfree(dip->csums);
...@@ -5796,7 +5794,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) ...@@ -5796,7 +5794,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset, lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
ordered->file_offset + ordered->len - 1, 0, ordered->file_offset + ordered->len - 1, 0,
&cached_state, GFP_NOFS); &cached_state);
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
ret = btrfs_mark_extent_written(trans, inode, ret = btrfs_mark_extent_written(trans, inode,
...@@ -6211,7 +6209,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, ...@@ -6211,7 +6209,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
while (1) { while (1) {
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
0, &cached_state, GFP_NOFS); 0, &cached_state);
/* /*
* We're concerned with the entire range that we're going to be * We're concerned with the entire range that we're going to be
* doing DIO to, so we need to make sure theres no ordered * doing DIO to, so we need to make sure theres no ordered
...@@ -6365,8 +6363,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) ...@@ -6365,8 +6363,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
btrfs_releasepage(page, GFP_NOFS); btrfs_releasepage(page, GFP_NOFS);
return; return;
} }
lock_extent_bits(tree, page_start, page_end, 0, &cached_state, lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
GFP_NOFS);
ordered = btrfs_lookup_ordered_extent(page->mapping->host, ordered = btrfs_lookup_ordered_extent(page->mapping->host,
page_offset(page)); page_offset(page));
if (ordered) { if (ordered) {
...@@ -6388,8 +6385,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) ...@@ -6388,8 +6385,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
} }
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
cached_state = NULL; cached_state = NULL;
lock_extent_bits(tree, page_start, page_end, 0, &cached_state, lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
GFP_NOFS);
} }
clear_extent_bit(tree, page_start, page_end, clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
...@@ -6464,8 +6460,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -6464,8 +6460,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
wait_on_page_writeback(page); wait_on_page_writeback(page);
lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
GFP_NOFS);
set_page_extent_mapped(page); set_page_extent_mapped(page);
/* /*
......
...@@ -797,9 +797,9 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, ...@@ -797,9 +797,9 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
if (!em) { if (!em) {
/* get the big lock and read metadata off disk */ /* get the big lock and read metadata off disk */
lock_extent(io_tree, start, start + len - 1, GFP_NOFS); lock_extent(io_tree, start, start + len - 1);
em = btrfs_get_extent(inode, NULL, 0, start, len, 0); em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); unlock_extent(io_tree, start, start + len - 1);
if (IS_ERR(em)) if (IS_ERR(em))
return 0; return 0;
...@@ -887,10 +887,10 @@ static int cluster_pages_for_defrag(struct inode *inode, ...@@ -887,10 +887,10 @@ static int cluster_pages_for_defrag(struct inode *inode,
page_start = page_offset(page); page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1; page_end = page_start + PAGE_CACHE_SIZE - 1;
while (1) { while (1) {
lock_extent(tree, page_start, page_end, GFP_NOFS); lock_extent(tree, page_start, page_end);
ordered = btrfs_lookup_ordered_extent(inode, ordered = btrfs_lookup_ordered_extent(inode,
page_start); page_start);
unlock_extent(tree, page_start, page_end, GFP_NOFS); unlock_extent(tree, page_start, page_end);
if (!ordered) if (!ordered)
break; break;
...@@ -946,8 +946,7 @@ static int cluster_pages_for_defrag(struct inode *inode, ...@@ -946,8 +946,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end - 1, 0, &cached_state, page_start, page_end - 1, 0, &cached_state);
GFP_NOFS);
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
...@@ -2326,13 +2325,13 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -2326,13 +2325,13 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
another, and lock file content */ another, and lock file content */
while (1) { while (1) {
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); lock_extent(&BTRFS_I(src)->io_tree, off, off+len);
ordered = btrfs_lookup_first_ordered_extent(src, off+len); ordered = btrfs_lookup_first_ordered_extent(src, off+len);
if (!ordered && if (!ordered &&
!test_range_bit(&BTRFS_I(src)->io_tree, off, off+len, !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len,
EXTENT_DELALLOC, 0, NULL)) EXTENT_DELALLOC, 0, NULL))
break; break;
unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
if (ordered) if (ordered)
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
btrfs_wait_ordered_range(src, off, len); btrfs_wait_ordered_range(src, off, len);
...@@ -2551,7 +2550,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -2551,7 +2550,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
ret = 0; ret = 0;
out: out:
btrfs_release_path(path); btrfs_release_path(path);
unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
out_unlock: out_unlock:
mutex_unlock(&src->i_mutex); mutex_unlock(&src->i_mutex);
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
......
...@@ -1604,15 +1604,14 @@ int replace_file_extents(struct btrfs_trans_handle *trans, ...@@ -1604,15 +1604,14 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
WARN_ON(!IS_ALIGNED(end, root->sectorsize)); WARN_ON(!IS_ALIGNED(end, root->sectorsize));
end--; end--;
ret = try_lock_extent(&BTRFS_I(inode)->io_tree, ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
key.offset, end, key.offset, end);
GFP_NOFS);
if (!ret) if (!ret)
continue; continue;
btrfs_drop_extent_cache(inode, key.offset, end, btrfs_drop_extent_cache(inode, key.offset, end,
1); 1);
unlock_extent(&BTRFS_I(inode)->io_tree, unlock_extent(&BTRFS_I(inode)->io_tree,
key.offset, end, GFP_NOFS); key.offset, end);
} }
} }
...@@ -1983,9 +1982,9 @@ static int invalidate_extent_cache(struct btrfs_root *root, ...@@ -1983,9 +1982,9 @@ static int invalidate_extent_cache(struct btrfs_root *root,
} }
/* the lock_extent waits for readpage to complete */ /* the lock_extent waits for readpage to complete */
lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); lock_extent(&BTRFS_I(inode)->io_tree, start, end);
btrfs_drop_extent_cache(inode, start, end, 1); btrfs_drop_extent_cache(inode, start, end, 1);
unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
} }
return 0; return 0;
} }
...@@ -2889,12 +2888,12 @@ int prealloc_file_extent_cluster(struct inode *inode, ...@@ -2889,12 +2888,12 @@ int prealloc_file_extent_cluster(struct inode *inode,
else else
end = cluster->end - offset; end = cluster->end - offset;
lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); lock_extent(&BTRFS_I(inode)->io_tree, start, end);
num_bytes = end + 1 - start; num_bytes = end + 1 - start;
ret = btrfs_prealloc_file_range(inode, 0, start, ret = btrfs_prealloc_file_range(inode, 0, start,
num_bytes, num_bytes, num_bytes, num_bytes,
end + 1, &alloc_hint); end + 1, &alloc_hint);
unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
if (ret) if (ret)
break; break;
nr++; nr++;
...@@ -2926,7 +2925,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end, ...@@ -2926,7 +2925,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
em->bdev = root->fs_info->fs_devices->latest_bdev; em->bdev = root->fs_info->fs_devices->latest_bdev;
set_bit(EXTENT_FLAG_PINNED, &em->flags); set_bit(EXTENT_FLAG_PINNED, &em->flags);
lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); lock_extent(&BTRFS_I(inode)->io_tree, start, end);
while (1) { while (1) {
write_lock(&em_tree->lock); write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em); ret = add_extent_mapping(em_tree, em);
...@@ -2937,7 +2936,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end, ...@@ -2937,7 +2936,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
} }
btrfs_drop_extent_cache(inode, start, end, 0); btrfs_drop_extent_cache(inode, start, end, 0);
} }
unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
return ret; return ret;
} }
...@@ -3017,8 +3016,7 @@ static int relocate_file_extent_cluster(struct inode *inode, ...@@ -3017,8 +3016,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
page_start = (u64)page->index << PAGE_CACHE_SHIFT; page_start = (u64)page->index << PAGE_CACHE_SHIFT;
page_end = page_start + PAGE_CACHE_SIZE - 1; page_end = page_start + PAGE_CACHE_SIZE - 1;
lock_extent(&BTRFS_I(inode)->io_tree, lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
page_start, page_end, GFP_NOFS);
set_page_extent_mapped(page); set_page_extent_mapped(page);
...@@ -3034,7 +3032,7 @@ static int relocate_file_extent_cluster(struct inode *inode, ...@@ -3034,7 +3032,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
set_page_dirty(page); set_page_dirty(page);
unlock_extent(&BTRFS_I(inode)->io_tree, unlock_extent(&BTRFS_I(inode)->io_tree,
page_start, page_end, GFP_NOFS); page_start, page_end);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment