Commit c534aa3f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

mm: return an unsigned int from __do_page_cache_readahead

We never return an error, so switch to returning an unsigned int.  Most
callers already did implicit casts to an unsigned type, and the one that
didn't can be simplified now.
Suggested-by: default avatarMatthew Wilcox <willy@infradead.org>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 836978b3
...@@ -53,7 +53,7 @@ void unmap_page_range(struct mmu_gather *tlb, ...@@ -53,7 +53,7 @@ void unmap_page_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
struct zap_details *details); struct zap_details *details);
extern int __do_page_cache_readahead(struct address_space *mapping, extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
struct file *filp, pgoff_t offset, unsigned long nr_to_read, struct file *filp, pgoff_t offset, unsigned long nr_to_read,
unsigned long lookahead_size); unsigned long lookahead_size);
......
...@@ -147,16 +147,16 @@ static int read_pages(struct address_space *mapping, struct file *filp, ...@@ -147,16 +147,16 @@ static int read_pages(struct address_space *mapping, struct file *filp,
* *
* Returns the number of pages requested, or the maximum amount of I/O allowed. * Returns the number of pages requested, or the maximum amount of I/O allowed.
*/ */
int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, unsigned int __do_page_cache_readahead(struct address_space *mapping,
pgoff_t offset, unsigned long nr_to_read, struct file *filp, pgoff_t offset, unsigned long nr_to_read,
unsigned long lookahead_size) unsigned long lookahead_size)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct page *page; struct page *page;
unsigned long end_index; /* The last page we want to read */ unsigned long end_index; /* The last page we want to read */
LIST_HEAD(page_pool); LIST_HEAD(page_pool);
int page_idx; int page_idx;
int nr_pages = 0; unsigned int nr_pages = 0;
loff_t isize = i_size_read(inode); loff_t isize = i_size_read(inode);
gfp_t gfp_mask = readahead_gfp_mask(mapping); gfp_t gfp_mask = readahead_gfp_mask(mapping);
...@@ -223,16 +223,11 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, ...@@ -223,16 +223,11 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
nr_to_read = min(nr_to_read, max_pages); nr_to_read = min(nr_to_read, max_pages);
while (nr_to_read) { while (nr_to_read) {
int err;
unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
if (this_chunk > nr_to_read) if (this_chunk > nr_to_read)
this_chunk = nr_to_read; this_chunk = nr_to_read;
err = __do_page_cache_readahead(mapping, filp, __do_page_cache_readahead(mapping, filp, offset, this_chunk, 0);
offset, this_chunk, 0);
if (err < 0)
return err;
offset += this_chunk; offset += this_chunk;
nr_to_read -= this_chunk; nr_to_read -= this_chunk;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment