Commit ed6078f7 authored by David Sterba's avatar David Sterba Committed by Chris Mason

btrfs: use DIV_ROUND_UP instead of open-coded variants

The form

  (value + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT

is equivalent to

  (value + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE

The rest is a simple subsitution, no difference in the generated
assembly code.
Signed-off-by: default avatarDavid Sterba <dsterba@suse.cz>
Signed-off-by: default avatarChris Mason <clm@fb.com>
parent 4e54b17a
...@@ -1251,8 +1251,7 @@ static void btrfsic_read_from_block_data( ...@@ -1251,8 +1251,7 @@ static void btrfsic_read_from_block_data(
while (len > 0) { while (len > 0) {
cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page)); cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page));
BUG_ON(i >= (block_ctx->len + PAGE_CACHE_SIZE - 1) >> BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_CACHE_SIZE));
PAGE_CACHE_SHIFT);
kaddr = block_ctx->datav[i]; kaddr = block_ctx->datav[i];
memcpy(dst, kaddr + offset_in_page, cur); memcpy(dst, kaddr + offset_in_page, cur);
......
...@@ -91,8 +91,7 @@ static inline int compressed_bio_size(struct btrfs_root *root, ...@@ -91,8 +91,7 @@ static inline int compressed_bio_size(struct btrfs_root *root,
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
return sizeof(struct compressed_bio) + return sizeof(struct compressed_bio) +
((disk_size + root->sectorsize - 1) / root->sectorsize) * (DIV_ROUND_UP(disk_size, root->sectorsize)) * csum_size;
csum_size;
} }
static struct bio *compressed_bio_alloc(struct block_device *bdev, static struct bio *compressed_bio_alloc(struct block_device *bdev,
...@@ -615,8 +614,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -615,8 +614,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cb->compress_type = extent_compress_type(bio_flags); cb->compress_type = extent_compress_type(bio_flags);
cb->orig_bio = bio; cb->orig_bio = bio;
nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE);
PAGE_CACHE_SIZE;
cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages, cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
GFP_NOFS); GFP_NOFS);
if (!cb->compressed_pages) if (!cb->compressed_pages)
...@@ -686,8 +684,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -686,8 +684,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
comp_bio, sums); comp_bio, sums);
BUG_ON(ret); /* -ENOMEM */ BUG_ON(ret); /* -ENOMEM */
} }
sums += (comp_bio->bi_iter.bi_size + sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
root->sectorsize - 1) / root->sectorsize; root->sectorsize);
ret = btrfs_map_bio(root, READ, comp_bio, ret = btrfs_map_bio(root, READ, comp_bio,
mirror_num, 0); mirror_num, 0);
......
...@@ -1481,9 +1481,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1481,9 +1481,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
bool force_page_uptodate = false; bool force_page_uptodate = false;
bool need_unlock; bool need_unlock;
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE),
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / PAGE_CACHE_SIZE / (sizeof(struct page *)));
(sizeof(struct page *)));
nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
nrptrs = max(nrptrs, 8); nrptrs = max(nrptrs, 8);
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
...@@ -1497,8 +1496,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1497,8 +1496,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
size_t write_bytes = min(iov_iter_count(i), size_t write_bytes = min(iov_iter_count(i),
nrptrs * (size_t)PAGE_CACHE_SIZE - nrptrs * (size_t)PAGE_CACHE_SIZE -
offset); offset);
size_t num_pages = (write_bytes + offset + size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; PAGE_CACHE_SIZE);
size_t reserve_bytes; size_t reserve_bytes;
size_t dirty_pages; size_t dirty_pages;
size_t copied; size_t copied;
...@@ -1526,9 +1525,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1526,9 +1525,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
* our prealloc extent may be smaller than * our prealloc extent may be smaller than
* write_bytes, so scale down. * write_bytes, so scale down.
*/ */
num_pages = (write_bytes + offset + num_pages = DIV_ROUND_UP(write_bytes + offset,
PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SIZE);
PAGE_CACHE_SHIFT;
reserve_bytes = num_pages << PAGE_CACHE_SHIFT; reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
ret = 0; ret = 0;
} else { } else {
...@@ -1590,9 +1588,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1590,9 +1588,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
dirty_pages = 0; dirty_pages = 0;
} else { } else {
force_page_uptodate = false; force_page_uptodate = false;
dirty_pages = (copied + offset + dirty_pages = DIV_ROUND_UP(copied + offset,
PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SIZE);
PAGE_CACHE_SHIFT;
} }
/* /*
......
...@@ -279,8 +279,7 @@ static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode, ...@@ -279,8 +279,7 @@ static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
int num_pages; int num_pages;
int check_crcs = 0; int check_crcs = 0;
num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
PAGE_CACHE_SHIFT;
if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
check_crcs = 1; check_crcs = 1;
......
...@@ -1335,8 +1335,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, ...@@ -1335,8 +1335,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
inode->i_mapping->writeback_index = i; inode->i_mapping->writeback_index = i;
while (i <= last_index && defrag_count < max_to_defrag && while (i <= last_index && defrag_count < max_to_defrag &&
(i < (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> (i < DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE))) {
PAGE_CACHE_SHIFT)) {
/* /*
* make sure we stop running if someone unmounts * make sure we stop running if someone unmounts
* the FS * the FS
...@@ -1359,7 +1358,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, ...@@ -1359,7 +1358,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
* the should_defrag function tells us how much to skip * the should_defrag function tells us how much to skip
* bump our counter by the suggested amount * bump our counter by the suggested amount
*/ */
next = (skip + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; next = DIV_ROUND_UP(skip, PAGE_CACHE_SIZE);
i = max(i + 1, next); i = max(i + 1, next);
continue; continue;
} }
......
...@@ -266,8 +266,7 @@ static int lzo_decompress_biovec(struct list_head *ws, ...@@ -266,8 +266,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
char *data_in; char *data_in;
unsigned long page_in_index = 0; unsigned long page_in_index = 0;
unsigned long page_out_index = 0; unsigned long page_out_index = 0;
unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) / unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE);
PAGE_CACHE_SIZE;
unsigned long buf_start; unsigned long buf_start;
unsigned long buf_offset = 0; unsigned long buf_offset = 0;
unsigned long bytes; unsigned long bytes;
......
...@@ -912,7 +912,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, ...@@ -912,7 +912,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
{ {
unsigned long nr = stripe_len * nr_stripes; unsigned long nr = stripe_len * nr_stripes;
return (nr + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; return DIV_ROUND_UP(nr, PAGE_CACHE_SIZE);
} }
/* /*
...@@ -1442,7 +1442,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) ...@@ -1442,7 +1442,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
struct btrfs_bio *bbio = rbio->bbio; struct btrfs_bio *bbio = rbio->bbio;
struct bio_list bio_list; struct bio_list bio_list;
int ret; int ret;
int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
int pagenr; int pagenr;
int stripe; int stripe;
struct bio *bio; struct bio *bio;
...@@ -1725,7 +1725,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -1725,7 +1725,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
int pagenr, stripe; int pagenr, stripe;
void **pointers; void **pointers;
int faila = -1, failb = -1; int faila = -1, failb = -1;
int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
struct page *page; struct page *page;
int err; int err;
int i; int i;
...@@ -1940,7 +1940,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) ...@@ -1940,7 +1940,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
struct btrfs_bio *bbio = rbio->bbio; struct btrfs_bio *bbio = rbio->bbio;
struct bio_list bio_list; struct bio_list bio_list;
int ret; int ret;
int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
int pagenr; int pagenr;
int stripe; int stripe;
struct bio *bio; struct bio *bio;
......
...@@ -225,8 +225,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, ...@@ -225,8 +225,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
size_t total_out = 0; size_t total_out = 0;
unsigned long page_in_index = 0; unsigned long page_in_index = 0;
unsigned long page_out_index = 0; unsigned long page_out_index = 0;
unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) / unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE);
PAGE_CACHE_SIZE;
unsigned long buf_start; unsigned long buf_start;
unsigned long pg_offset; unsigned long pg_offset;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment