Commit 7ac687d9 authored by Cong Wang's avatar Cong Wang Committed by Cong Wang

btrfs: remove the second argument of k[un]map_atomic()

Signed-off-by: default avatarCong Wang <amwang@redhat.com>
parent e8e3c3d6
...@@ -120,10 +120,10 @@ static int check_compressed_csum(struct inode *inode, ...@@ -120,10 +120,10 @@ static int check_compressed_csum(struct inode *inode,
page = cb->compressed_pages[i]; page = cb->compressed_pages[i];
csum = ~(u32)0; csum = ~(u32)0;
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE); csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE);
btrfs_csum_final(csum, (char *)&csum); btrfs_csum_final(csum, (char *)&csum);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
if (csum != *cb_sum) { if (csum != *cb_sum) {
printk(KERN_INFO "btrfs csum failed ino %llu " printk(KERN_INFO "btrfs csum failed ino %llu "
...@@ -521,10 +521,10 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -521,10 +521,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (zero_offset) { if (zero_offset) {
int zeros; int zeros;
zeros = PAGE_CACHE_SIZE - zero_offset; zeros = PAGE_CACHE_SIZE - zero_offset;
userpage = kmap_atomic(page, KM_USER0); userpage = kmap_atomic(page);
memset(userpage + zero_offset, 0, zeros); memset(userpage + zero_offset, 0, zeros);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0); kunmap_atomic(userpage);
} }
} }
...@@ -993,9 +993,9 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, ...@@ -993,9 +993,9 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
bytes = min(PAGE_CACHE_SIZE - *pg_offset, bytes = min(PAGE_CACHE_SIZE - *pg_offset,
PAGE_CACHE_SIZE - buf_offset); PAGE_CACHE_SIZE - buf_offset);
bytes = min(bytes, working_bytes); bytes = min(bytes, working_bytes);
kaddr = kmap_atomic(page_out, KM_USER0); kaddr = kmap_atomic(page_out);
memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
flush_dcache_page(page_out); flush_dcache_page(page_out);
*pg_offset += bytes; *pg_offset += bytes;
......
...@@ -2546,10 +2546,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree, ...@@ -2546,10 +2546,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
if (zero_offset) { if (zero_offset) {
iosize = PAGE_CACHE_SIZE - zero_offset; iosize = PAGE_CACHE_SIZE - zero_offset;
userpage = kmap_atomic(page, KM_USER0); userpage = kmap_atomic(page);
memset(userpage + zero_offset, 0, iosize); memset(userpage + zero_offset, 0, iosize);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0); kunmap_atomic(userpage);
} }
} }
while (cur <= end) { while (cur <= end) {
...@@ -2558,10 +2558,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree, ...@@ -2558,10 +2558,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
struct extent_state *cached = NULL; struct extent_state *cached = NULL;
iosize = PAGE_CACHE_SIZE - pg_offset; iosize = PAGE_CACHE_SIZE - pg_offset;
userpage = kmap_atomic(page, KM_USER0); userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0, iosize); memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0); kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1, set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS); &cached, GFP_NOFS);
unlock_extent_cached(tree, cur, cur + iosize - 1, unlock_extent_cached(tree, cur, cur + iosize - 1,
...@@ -2607,10 +2607,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree, ...@@ -2607,10 +2607,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
char *userpage; char *userpage;
struct extent_state *cached = NULL; struct extent_state *cached = NULL;
userpage = kmap_atomic(page, KM_USER0); userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0, iosize); memset(userpage + pg_offset, 0, iosize);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0); kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1, set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS); &cached, GFP_NOFS);
...@@ -2756,10 +2756,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ...@@ -2756,10 +2756,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
if (page->index == end_index) { if (page->index == end_index) {
char *userpage; char *userpage;
userpage = kmap_atomic(page, KM_USER0); userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0, memset(userpage + pg_offset, 0,
PAGE_CACHE_SIZE - pg_offset); PAGE_CACHE_SIZE - pg_offset);
kunmap_atomic(userpage, KM_USER0); kunmap_atomic(userpage);
flush_dcache_page(page); flush_dcache_page(page);
} }
pg_offset = 0; pg_offset = 0;
......
...@@ -447,13 +447,13 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, ...@@ -447,13 +447,13 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
sums->bytenr = ordered->start; sums->bytenr = ordered->start;
} }
data = kmap_atomic(bvec->bv_page, KM_USER0); data = kmap_atomic(bvec->bv_page);
sector_sum->sum = ~(u32)0; sector_sum->sum = ~(u32)0;
sector_sum->sum = btrfs_csum_data(root, sector_sum->sum = btrfs_csum_data(root,
data + bvec->bv_offset, data + bvec->bv_offset,
sector_sum->sum, sector_sum->sum,
bvec->bv_len); bvec->bv_len);
kunmap_atomic(data, KM_USER0); kunmap_atomic(data);
btrfs_csum_final(sector_sum->sum, btrfs_csum_final(sector_sum->sum,
(char *)&sector_sum->sum); (char *)&sector_sum->sum);
sector_sum->bytenr = disk_bytenr; sector_sum->bytenr = disk_bytenr;
......
...@@ -173,9 +173,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, ...@@ -173,9 +173,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
cur_size = min_t(unsigned long, compressed_size, cur_size = min_t(unsigned long, compressed_size,
PAGE_CACHE_SIZE); PAGE_CACHE_SIZE);
kaddr = kmap_atomic(cpage, KM_USER0); kaddr = kmap_atomic(cpage);
write_extent_buffer(leaf, kaddr, ptr, cur_size); write_extent_buffer(leaf, kaddr, ptr, cur_size);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
i++; i++;
ptr += cur_size; ptr += cur_size;
...@@ -187,10 +187,10 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, ...@@ -187,10 +187,10 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
page = find_get_page(inode->i_mapping, page = find_get_page(inode->i_mapping,
start >> PAGE_CACHE_SHIFT); start >> PAGE_CACHE_SHIFT);
btrfs_set_file_extent_compression(leaf, ei, 0); btrfs_set_file_extent_compression(leaf, ei, 0);
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
offset = start & (PAGE_CACHE_SIZE - 1); offset = start & (PAGE_CACHE_SIZE - 1);
write_extent_buffer(leaf, kaddr + offset, ptr, size); write_extent_buffer(leaf, kaddr + offset, ptr, size);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
page_cache_release(page); page_cache_release(page);
} }
btrfs_mark_buffer_dirty(leaf); btrfs_mark_buffer_dirty(leaf);
...@@ -422,10 +422,10 @@ static noinline int compress_file_range(struct inode *inode, ...@@ -422,10 +422,10 @@ static noinline int compress_file_range(struct inode *inode,
* sending it down to disk * sending it down to disk
*/ */
if (offset) { if (offset) {
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
memset(kaddr + offset, 0, memset(kaddr + offset, 0,
PAGE_CACHE_SIZE - offset); PAGE_CACHE_SIZE - offset);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
will_compress = 1; will_compress = 1;
} }
...@@ -1873,7 +1873,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, ...@@ -1873,7 +1873,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
} else { } else {
ret = get_state_private(io_tree, start, &private); ret = get_state_private(io_tree, start, &private);
} }
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
if (ret) if (ret)
goto zeroit; goto zeroit;
...@@ -1882,7 +1882,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, ...@@ -1882,7 +1882,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
if (csum != private) if (csum != private)
goto zeroit; goto zeroit;
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
good: good:
return 0; return 0;
...@@ -1894,7 +1894,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, ...@@ -1894,7 +1894,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
(unsigned long long)private); (unsigned long long)private);
memset(kaddr + offset, 1, end - start + 1); memset(kaddr + offset, 1, end - start + 1);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
if (private == 0) if (private == 0)
return 0; return 0;
return -EIO; return -EIO;
...@@ -4937,12 +4937,12 @@ static noinline int uncompress_inline(struct btrfs_path *path, ...@@ -4937,12 +4937,12 @@ static noinline int uncompress_inline(struct btrfs_path *path,
ret = btrfs_decompress(compress_type, tmp, page, ret = btrfs_decompress(compress_type, tmp, page,
extent_offset, inline_size, max_size); extent_offset, inline_size, max_size);
if (ret) { if (ret) {
char *kaddr = kmap_atomic(page, KM_USER0); char *kaddr = kmap_atomic(page);
unsigned long copy_size = min_t(u64, unsigned long copy_size = min_t(u64,
PAGE_CACHE_SIZE - pg_offset, PAGE_CACHE_SIZE - pg_offset,
max_size - extent_offset); max_size - extent_offset);
memset(kaddr + pg_offset, 0, copy_size); memset(kaddr + pg_offset, 0, copy_size);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
kfree(tmp); kfree(tmp);
return 0; return 0;
...@@ -5719,11 +5719,11 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) ...@@ -5719,11 +5719,11 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
kaddr = kmap_atomic(page, KM_IRQ0); kaddr = kmap_atomic(page);
csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
csum, bvec->bv_len); csum, bvec->bv_len);
btrfs_csum_final(csum, (char *)&csum); btrfs_csum_final(csum, (char *)&csum);
kunmap_atomic(kaddr, KM_IRQ0); kunmap_atomic(kaddr);
local_irq_restore(flags); local_irq_restore(flags);
flush_dcache_page(bvec->bv_page); flush_dcache_page(bvec->bv_page);
......
...@@ -411,9 +411,9 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in, ...@@ -411,9 +411,9 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
bytes = min_t(unsigned long, destlen, out_len - start_byte); bytes = min_t(unsigned long, destlen, out_len - start_byte);
kaddr = kmap_atomic(dest_page, KM_USER0); kaddr = kmap_atomic(dest_page);
memcpy(kaddr, workspace->buf + start_byte, bytes); memcpy(kaddr, workspace->buf + start_byte, bytes);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
out: out:
return ret; return ret;
} }
......
...@@ -591,7 +591,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix) ...@@ -591,7 +591,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
u64 flags = sbio->spag[ix].flags; u64 flags = sbio->spag[ix].flags;
page = sbio->bio->bi_io_vec[ix].bv_page; page = sbio->bio->bi_io_vec[ix].bv_page;
buffer = kmap_atomic(page, KM_USER0); buffer = kmap_atomic(page);
if (flags & BTRFS_EXTENT_FLAG_DATA) { if (flags & BTRFS_EXTENT_FLAG_DATA) {
ret = scrub_checksum_data(sbio->sdev, ret = scrub_checksum_data(sbio->sdev,
sbio->spag + ix, buffer); sbio->spag + ix, buffer);
...@@ -603,7 +603,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix) ...@@ -603,7 +603,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
} else { } else {
WARN_ON(1); WARN_ON(1);
} }
kunmap_atomic(buffer, KM_USER0); kunmap_atomic(buffer);
return ret; return ret;
} }
...@@ -792,7 +792,7 @@ static void scrub_checksum(struct btrfs_work *work) ...@@ -792,7 +792,7 @@ static void scrub_checksum(struct btrfs_work *work)
} }
for (i = 0; i < sbio->count; ++i) { for (i = 0; i < sbio->count; ++i) {
page = sbio->bio->bi_io_vec[i].bv_page; page = sbio->bio->bi_io_vec[i].bv_page;
buffer = kmap_atomic(page, KM_USER0); buffer = kmap_atomic(page);
flags = sbio->spag[i].flags; flags = sbio->spag[i].flags;
logical = sbio->logical + i * PAGE_SIZE; logical = sbio->logical + i * PAGE_SIZE;
ret = 0; ret = 0;
...@@ -807,7 +807,7 @@ static void scrub_checksum(struct btrfs_work *work) ...@@ -807,7 +807,7 @@ static void scrub_checksum(struct btrfs_work *work)
} else { } else {
WARN_ON(1); WARN_ON(1);
} }
kunmap_atomic(buffer, KM_USER0); kunmap_atomic(buffer);
if (ret) { if (ret) {
ret = scrub_recheck_error(sbio, i); ret = scrub_recheck_error(sbio, i);
if (!ret) { if (!ret) {
......
...@@ -370,9 +370,9 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, ...@@ -370,9 +370,9 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
PAGE_CACHE_SIZE - buf_offset); PAGE_CACHE_SIZE - buf_offset);
bytes = min(bytes, bytes_left); bytes = min(bytes, bytes_left);
kaddr = kmap_atomic(dest_page, KM_USER0); kaddr = kmap_atomic(dest_page);
memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes); memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
pg_offset += bytes; pg_offset += bytes;
bytes_left -= bytes; bytes_left -= bytes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment