Commit 807c6d09 authored by David Howells's avatar David Howells

netfs: Fix the loop that unmarks folios after writing to the cache

In the loop in netfs_rreq_unmark_after_write() that removes the PG_fscache
from folios after they've been written to the cache, as soon as we remove
the mark from a multipage folio, it can get split - and then we might see a
fragment of folio again.

Guard against this by advancing the 'unlocked' tracker to the index of the
last page in the folio to avoid a double removal of the PG_fscache mark.
Reported-by: default avatarMarc Dionne <marc.dionne@auristor.com>
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
cc: Matthew Wilcox <willy@infradead.org>
cc: linux-afs@lists.infradead.org
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
parent 92a714d7
...@@ -698,6 +698,7 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq) ...@@ -698,6 +698,7 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq)
end_wb: end_wb:
if (folio_test_fscache(folio)) if (folio_test_fscache(folio))
folio_end_fscache(folio); folio_end_fscache(folio);
xas_advance(&xas, folio_next_index(folio) - 1);
folio_end_writeback(folio); folio_end_writeback(folio);
} }
......
...@@ -126,7 +126,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq, ...@@ -126,7 +126,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
*/ */
if (have_unlocked && folio_index(folio) <= unlocked) if (have_unlocked && folio_index(folio) <= unlocked)
continue; continue;
unlocked = folio_index(folio); unlocked = folio_next_index(folio) - 1;
trace_netfs_folio(folio, netfs_folio_trace_end_copy); trace_netfs_folio(folio, netfs_folio_trace_end_copy);
folio_end_fscache(folio); folio_end_fscache(folio);
have_unlocked = true; have_unlocked = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment