Commit 9aac777a authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Anna Schumaker

filemap: Convert generic_perform_write() to support large folios

Modelled after the loop in iomap_write_iter(), copy larger chunks from
userspace if the filesystem has created large folios.

[hch: use mapping_max_folio_size to keep supporting file systems that do
 not support large folios]
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarShaun Tancheff <shaun.tancheff@hpe.com>
Tested-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 146a99ae
...@@ -3981,21 +3981,24 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) ...@@ -3981,21 +3981,24 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
loff_t pos = iocb->ki_pos; loff_t pos = iocb->ki_pos;
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
const struct address_space_operations *a_ops = mapping->a_ops; const struct address_space_operations *a_ops = mapping->a_ops;
size_t chunk = mapping_max_folio_size(mapping);
long status = 0; long status = 0;
ssize_t written = 0; ssize_t written = 0;
do { do {
struct page *page; struct page *page;
unsigned long offset; /* Offset into pagecache page */ struct folio *folio;
unsigned long bytes; /* Bytes to write to page */ size_t offset; /* Offset into folio */
size_t bytes; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */ size_t copied; /* Bytes copied from user */
void *fsdata = NULL; void *fsdata = NULL;
offset = (pos & (PAGE_SIZE - 1)); bytes = iov_iter_count(i);
bytes = min_t(unsigned long, PAGE_SIZE - offset, retry:
iov_iter_count(i)); offset = pos & (chunk - 1);
bytes = min(chunk - offset, bytes);
balance_dirty_pages_ratelimited(mapping);
again:
/* /*
* Bring in the user page that we will copy from _first_. * Bring in the user page that we will copy from _first_.
* Otherwise there's a nasty deadlock on copying from the * Otherwise there's a nasty deadlock on copying from the
...@@ -4017,11 +4020,16 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) ...@@ -4017,11 +4020,16 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
if (unlikely(status < 0)) if (unlikely(status < 0))
break; break;
folio = page_folio(page);
offset = offset_in_folio(folio, pos);
if (bytes > folio_size(folio) - offset)
bytes = folio_size(folio) - offset;
if (mapping_writably_mapped(mapping)) if (mapping_writably_mapped(mapping))
flush_dcache_page(page); flush_dcache_folio(folio);
copied = copy_page_from_iter_atomic(page, offset, bytes, i); copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
flush_dcache_page(page); flush_dcache_folio(folio);
status = a_ops->write_end(file, mapping, pos, bytes, copied, status = a_ops->write_end(file, mapping, pos, bytes, copied,
page, fsdata); page, fsdata);
...@@ -4039,14 +4047,16 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) ...@@ -4039,14 +4047,16 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
* halfway through, might be a race with munmap, * halfway through, might be a race with munmap,
* might be severe memory pressure. * might be severe memory pressure.
*/ */
if (copied) if (chunk > PAGE_SIZE)
chunk /= 2;
if (copied) {
bytes = copied; bytes = copied;
goto again; goto retry;
} }
} else {
pos += status; pos += status;
written += status; written += status;
}
balance_dirty_pages_ratelimited(mapping);
} while (iov_iter_count(i)); } while (iov_iter_count(i));
if (!written) if (!written)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment