Commit d35c34bb authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Alexander Gordeev

s390/mm: Convert gmap_make_secure to use a folio

Remove uses of deprecated page APIs, and move the check for large
folios to here to avoid taking the folio lock if the folio is too large.
We could do better here by attempting to split the large folio, but I'll
leave that improvement for someone who can test it.
Acked-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Link: https://lore.kernel.org/r/20240322161149.2327518-3-willy@infradead.orgSigned-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
parent 259e660d
...@@ -202,13 +202,10 @@ static int expected_folio_refs(struct folio *folio) ...@@ -202,13 +202,10 @@ static int expected_folio_refs(struct folio *folio)
return res; return res;
} }
static int make_page_secure(struct page *page, struct uv_cb_header *uvcb) static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
{ {
struct folio *folio = page_folio(page);
int expected, cc = 0; int expected, cc = 0;
if (folio_test_large(folio))
return -EINVAL;
if (folio_test_writeback(folio)) if (folio_test_writeback(folio))
return -EAGAIN; return -EAGAIN;
expected = expected_folio_refs(folio); expected = expected_folio_refs(folio);
...@@ -281,7 +278,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) ...@@ -281,7 +278,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
bool local_drain = false; bool local_drain = false;
spinlock_t *ptelock; spinlock_t *ptelock;
unsigned long uaddr; unsigned long uaddr;
struct page *page; struct folio *folio;
pte_t *ptep; pte_t *ptep;
int rc; int rc;
...@@ -310,15 +307,19 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) ...@@ -310,15 +307,19 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
if (!ptep) if (!ptep)
goto out; goto out;
if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) { if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
page = pte_page(*ptep); folio = page_folio(pte_page(*ptep));
rc = -EINVAL;
if (folio_test_large(folio))
goto unlock;
rc = -EAGAIN; rc = -EAGAIN;
if (trylock_page(page)) { if (folio_trylock(folio)) {
if (should_export_before_import(uvcb, gmap->mm)) if (should_export_before_import(uvcb, gmap->mm))
uv_convert_from_secure(page_to_phys(page)); uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
rc = make_page_secure(page, uvcb); rc = make_folio_secure(folio, uvcb);
unlock_page(page); folio_unlock(folio);
} }
} }
unlock:
pte_unmap_unlock(ptep, ptelock); pte_unmap_unlock(ptep, ptelock);
out: out:
mmap_read_unlock(gmap->mm); mmap_read_unlock(gmap->mm);
...@@ -328,10 +329,10 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) ...@@ -328,10 +329,10 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
* If we are here because the UVC returned busy or partial * If we are here because the UVC returned busy or partial
* completion, this is just a useless check, but it is safe. * completion, this is just a useless check, but it is safe.
*/ */
wait_on_page_writeback(page); folio_wait_writeback(folio);
} else if (rc == -EBUSY) { } else if (rc == -EBUSY) {
/* /*
* If we have tried a local drain and the page refcount * If we have tried a local drain and the folio refcount
* still does not match our expected safe value, try with a * still does not match our expected safe value, try with a
* system wide drain. This is needed if the pagevecs holding * system wide drain. This is needed if the pagevecs holding
* the page are on a different CPU. * the page are on a different CPU.
...@@ -342,7 +343,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) ...@@ -342,7 +343,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
return -EAGAIN; return -EAGAIN;
} }
/* /*
* We are here if the page refcount does not match the * We are here if the folio refcount does not match the
* expected safe value. The main culprits are usually * expected safe value. The main culprits are usually
* pagevecs. With lru_add_drain() we drain the pagevecs * pagevecs. With lru_add_drain() we drain the pagevecs
* on the local CPU so that hopefully the refcount will * on the local CPU so that hopefully the refcount will
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment