Commit 640ab98f authored by Jens Axboe's avatar Jens Axboe

buffer: have alloc_page_buffers() use __GFP_NOFAIL

Instead of adding weird retry logic in that function, utilize
__GFP_NOFAIL to ensure that the vm takes care of handling any
potential retries appropriately. This means we don't have to
call free_more_memory() from here.
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7beb2f84
...@@ -368,7 +368,7 @@ static int read_page(struct file *file, unsigned long index, ...@@ -368,7 +368,7 @@ static int read_page(struct file *file, unsigned long index,
pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT); (unsigned long long)index << PAGE_SHIFT);
bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0); bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false);
if (!bh) { if (!bh) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
......
...@@ -861,16 +861,19 @@ int remove_inode_buffers(struct inode *inode) ...@@ -861,16 +861,19 @@ int remove_inode_buffers(struct inode *inode)
* which may not fail from ordinary buffer allocations. * which may not fail from ordinary buffer allocations.
*/ */
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
int retry) bool retry)
{ {
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
gfp_t gfp = GFP_NOFS;
long offset; long offset;
try_again: if (retry)
gfp |= __GFP_NOFAIL;
head = NULL; head = NULL;
offset = PAGE_SIZE; offset = PAGE_SIZE;
while ((offset -= size) >= 0) { while ((offset -= size) >= 0) {
bh = alloc_buffer_head(GFP_NOFS); bh = alloc_buffer_head(gfp);
if (!bh) if (!bh)
goto no_grow; goto no_grow;
...@@ -896,23 +899,7 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, ...@@ -896,23 +899,7 @@ struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
} while (head); } while (head);
} }
/*
* Return failure for non-async IO requests. Async IO requests
* are not allowed to fail, so we have to wait until buffer heads
* become available. But we don't want tasks sleeping with
* partially complete buffers, so all were released above.
*/
if (!retry)
return NULL; return NULL;
/* We're _really_ low on memory. Now we just
* wait for old buffer heads to become free due to
* finishing IO. Since this is an async request and
* the reserve list is empty, we're sure there are
* async buffer heads in use.
*/
free_more_memory();
goto try_again;
} }
EXPORT_SYMBOL_GPL(alloc_page_buffers); EXPORT_SYMBOL_GPL(alloc_page_buffers);
...@@ -1021,7 +1008,7 @@ grow_dev_page(struct block_device *bdev, sector_t block, ...@@ -1021,7 +1008,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
/* /*
* Allocate some buffers for this page * Allocate some buffers for this page
*/ */
bh = alloc_page_buffers(page, size, 0); bh = alloc_page_buffers(page, size, false);
if (!bh) if (!bh)
goto failed; goto failed;
...@@ -1575,7 +1562,7 @@ void create_empty_buffers(struct page *page, ...@@ -1575,7 +1562,7 @@ void create_empty_buffers(struct page *page,
{ {
struct buffer_head *bh, *head, *tail; struct buffer_head *bh, *head, *tail;
head = alloc_page_buffers(page, blocksize, 1); head = alloc_page_buffers(page, blocksize, true);
bh = head; bh = head;
do { do {
bh->b_state |= b_state; bh->b_state |= b_state;
...@@ -2638,7 +2625,7 @@ int nobh_write_begin(struct address_space *mapping, ...@@ -2638,7 +2625,7 @@ int nobh_write_begin(struct address_space *mapping,
* Be careful: the buffer linked list is a NULL terminated one, rather * Be careful: the buffer linked list is a NULL terminated one, rather
* than the circular one we're used to. * than the circular one we're used to.
*/ */
head = alloc_page_buffers(page, blocksize, 0); head = alloc_page_buffers(page, blocksize, false);
if (!head) { if (!head) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_release; goto out_release;
......
...@@ -1739,7 +1739,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) { ...@@ -1739,7 +1739,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
spin_lock(&mapping->private_lock); spin_lock(&mapping->private_lock);
if (unlikely(!page_has_buffers(page))) { if (unlikely(!page_has_buffers(page))) {
spin_unlock(&mapping->private_lock); spin_unlock(&mapping->private_lock);
bh = head = alloc_page_buffers(page, bh_size, 1); bh = head = alloc_page_buffers(page, bh_size, true);
spin_lock(&mapping->private_lock); spin_lock(&mapping->private_lock);
if (likely(!page_has_buffers(page))) { if (likely(!page_has_buffers(page))) {
struct buffer_head *tail; struct buffer_head *tail;
......
...@@ -507,7 +507,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no, ...@@ -507,7 +507,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
if (unlikely(!page_has_buffers(page))) { if (unlikely(!page_has_buffers(page))) {
struct buffer_head *tail; struct buffer_head *tail;
bh = head = alloc_page_buffers(page, blocksize, 1); bh = head = alloc_page_buffers(page, blocksize, true);
do { do {
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
tail = bh; tail = bh;
......
...@@ -156,7 +156,7 @@ void set_bh_page(struct buffer_head *bh, ...@@ -156,7 +156,7 @@ void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset); struct page *page, unsigned long offset);
int try_to_free_buffers(struct page *); int try_to_free_buffers(struct page *);
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
int retry); bool retry);
void create_empty_buffers(struct page *, unsigned long, void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state); unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate); void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment