Commit 59d0d52c authored by Linus Torvalds's avatar Linus Torvalds

AMerge tag 'netfs-fixes-20221115' of...

AMerge tag 'netfs-fixes-20221115' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

Pull netfx fixes from David Howells:
 "Two fixes, affecting the functions that iterates over the pagecache
  unmarking or unlocking pages after an op is complete:

   - xas_for_each() loops must call xas_retry() first thing and
     immediately do a "continue" in the case that the extracted value is
     a special value that indicates that the walk raced with a
     modification. Fix the unlock and unmark loops to do this.

   - The maths in the unlock loop is dodgy as it could, theoretically,
     at some point in the future end up with a starting file pointer
     that is in the middle of a folio. This will cause a subtraction to
     go negative - but the number is unsigned. Fix the maths to use
     absolute file positions instead of relative page indices"

* tag 'netfs-fixes-20221115' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs:
  netfs: Fix dodgy maths
  netfs: Fix missing xas_retry() calls in xarray iteration
parents 81e7cfa3 5e51c627
......@@ -17,9 +17,9 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
struct folio *folio;
unsigned int iopos, account = 0;
pgoff_t start_page = rreq->start / PAGE_SIZE;
pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
size_t account = 0;
bool subreq_failed = false;
XA_STATE(xas, &rreq->mapping->i_pages, start_page);
......@@ -39,18 +39,23 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
*/
subreq = list_first_entry(&rreq->subrequests,
struct netfs_io_subrequest, rreq_link);
iopos = 0;
subreq_failed = (subreq->error < 0);
trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
rcu_read_lock();
xas_for_each(&xas, folio, last_page) {
unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
unsigned int pgend = pgpos + folio_size(folio);
loff_t pg_end;
bool pg_failed = false;
if (xas_retry(&xas, folio))
continue;
pg_end = folio_pos(folio) + folio_size(folio) - 1;
for (;;) {
loff_t sreq_end;
if (!subreq) {
pg_failed = true;
break;
......@@ -58,11 +63,11 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
folio_start_fscache(folio);
pg_failed |= subreq_failed;
if (pgend < iopos + subreq->len)
sreq_end = subreq->start + subreq->len - 1;
if (pg_end < sreq_end)
break;
account += subreq->transferred;
iopos += subreq->len;
if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
subreq = list_next_entry(subreq, rreq_link);
subreq_failed = (subreq->error < 0);
......@@ -70,7 +75,8 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
subreq = NULL;
subreq_failed = false;
}
if (pgend == iopos)
if (pg_end == sreq_end)
break;
}
......
......@@ -121,6 +121,9 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
if (xas_retry(&xas, folio))
continue;
/* We might have multiple writes from the same huge
* folio, but we mustn't unlock a folio more than once.
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment