iomap: Copy larger chunks from userspace

If we have a large folio, we can copy in larger chunks than PAGE_SIZE.
Start at the maximum page cache size and shrink by half every time we
hit the "we are short on memory" problem.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent d6bb59a9
...@@ -769,6 +769,7 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, ...@@ -769,6 +769,7 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
{ {
loff_t length = iomap_length(iter); loff_t length = iomap_length(iter);
size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
loff_t pos = iter->pos; loff_t pos = iter->pos;
ssize_t written = 0; ssize_t written = 0;
long status = 0; long status = 0;
...@@ -777,15 +778,12 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) ...@@ -777,15 +778,12 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
do { do {
struct folio *folio; struct folio *folio;
struct page *page; size_t offset; /* Offset into folio */
unsigned long offset; /* Offset into pagecache page */ size_t bytes; /* Bytes to write to folio */
unsigned long bytes; /* Bytes to write to page */
size_t copied; /* Bytes copied from user */ size_t copied; /* Bytes copied from user */
offset = offset_in_page(pos); offset = pos & (chunk - 1);
bytes = min_t(unsigned long, PAGE_SIZE - offset, bytes = min(chunk - offset, iov_iter_count(i));
iov_iter_count(i));
again:
status = balance_dirty_pages_ratelimited_flags(mapping, status = balance_dirty_pages_ratelimited_flags(mapping,
bdp_flags); bdp_flags);
if (unlikely(status)) if (unlikely(status))
...@@ -815,12 +813,14 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) ...@@ -815,12 +813,14 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
if (iter->iomap.flags & IOMAP_F_STALE) if (iter->iomap.flags & IOMAP_F_STALE)
break; break;
page = folio_file_page(folio, pos >> PAGE_SHIFT); offset = offset_in_folio(folio, pos);
if (mapping_writably_mapped(mapping)) if (bytes > folio_size(folio) - offset)
flush_dcache_page(page); bytes = folio_size(folio) - offset;
copied = copy_page_from_iter_atomic(page, offset, bytes, i); if (mapping_writably_mapped(mapping))
flush_dcache_folio(folio);
copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
status = iomap_write_end(iter, pos, bytes, copied, folio); status = iomap_write_end(iter, pos, bytes, copied, folio);
if (unlikely(copied != status)) if (unlikely(copied != status))
...@@ -836,11 +836,13 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) ...@@ -836,11 +836,13 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
*/ */
if (copied) if (copied)
bytes = copied; bytes = copied;
goto again; if (chunk > PAGE_SIZE)
chunk /= 2;
} else {
pos += status;
written += status;
length -= status;
} }
pos += status;
written += status;
length -= status;
} while (iov_iter_count(i) && length); } while (iov_iter_count(i) && length);
if (status == -EAGAIN) { if (status == -EAGAIN) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment