Commit fb724159 authored by Baolin Wang's avatar Baolin Wang Committed by Andrew Morton

mm: filemap: use xa_get_order() to get the swap entry order

In the following patches, shmem will support the swap out of large folios,
which means the shmem mappings may contain large order swap entries, so
using xa_get_order() to get the folio order of the shmem swap entry to
update the '*start' correctly.

[hughd@google.com: use xa_get_order() to get the swap entry order]
  Link: https://lkml.kernel.org/r/c336e6e4-da7f-b714-c0f1-12df715f2611@google.com
Link: https://lkml.kernel.org/r/6876d55145c1cc80e79df7884aa3a62e397b101d.1723434324.git.baolin.wang@linux.alibaba.comSigned-off-by: default avatarBaolin Wang <baolin.wang@linux.alibaba.com>
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: Daniel Gomez <da.gomez@samsung.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Pankaj Raghav <p.raghav@samsung.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6ea0d1cc
......@@ -2046,17 +2046,20 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
if (!folio_batch_add(fbatch, folio))
break;
}
rcu_read_unlock();
if (folio_batch_count(fbatch)) {
unsigned long nr = 1;
unsigned long nr;
int idx = folio_batch_count(fbatch) - 1;
folio = fbatch->folios[idx];
if (!xa_is_value(folio))
nr = folio_nr_pages(folio);
*start = indices[idx] + nr;
else
nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]);
*start = round_down(indices[idx] + nr, nr);
}
rcu_read_unlock();
return folio_batch_count(fbatch);
}
......@@ -2088,10 +2091,17 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
rcu_read_lock();
while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
unsigned long base;
unsigned long nr;
if (!xa_is_value(folio)) {
if (folio->index < *start)
nr = folio_nr_pages(folio);
base = folio->index;
/* Omit large folio which begins before the start */
if (base < *start)
goto put;
if (folio_next_index(folio) - 1 > end)
/* Omit large folio which extends beyond the end */
if (base + nr - 1 > end)
goto put;
if (!folio_trylock(folio))
goto put;
......@@ -2100,7 +2110,19 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
goto unlock;
VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
folio);
} else {
nr = 1 << xa_get_order(&mapping->i_pages, xas.xa_index);
base = xas.xa_index & ~(nr - 1);
/* Omit order>0 value which begins before the start */
if (base < *start)
continue;
/* Omit order>0 value which extends beyond the end */
if (base + nr - 1 > end)
break;
}
/* Update start now so that last update is correct on return */
*start = base + nr;
indices[fbatch->nr] = xas.xa_index;
if (!folio_batch_add(fbatch, folio))
break;
......@@ -2112,15 +2134,6 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
}
rcu_read_unlock();
if (folio_batch_count(fbatch)) {
unsigned long nr = 1;
int idx = folio_batch_count(fbatch) - 1;
folio = fbatch->folios[idx];
if (!xa_is_value(folio))
nr = folio_nr_pages(folio);
*start = indices[idx] + nr;
}
return folio_batch_count(fbatch);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment