Commit 68ad4743 authored by David Hildenbrand's avatar David Hildenbrand Committed by Alexander Gordeev

s390/uv: gmap_make_secure() cleanups for further changes

Let's factor out handling of LRU cache draining and convert the if-else
chain to a switch-case.
Reviewed-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Link: https://lore.kernel.org/r/20240508182955.358628-3-david@redhat.comSigned-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
parent 3f29f653
...@@ -266,6 +266,36 @@ static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_str ...@@ -266,6 +266,36 @@ static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_str
return atomic_read(&mm->context.protected_count) > 1; return atomic_read(&mm->context.protected_count) > 1;
} }
/*
* Drain LRU caches: the local one on first invocation and the ones of all
* CPUs on successive invocations. Returns "true" on the first invocation.
*/
static bool drain_lru(bool *drain_lru_called)
{
/*
* If we have tried a local drain and the folio refcount
* still does not match our expected safe value, try with a
* system wide drain. This is needed if the pagevecs holding
* the page are on a different CPU.
*/
if (*drain_lru_called) {
lru_add_drain_all();
/* We give up here, don't retry immediately. */
return false;
}
/*
* We are here if the folio refcount does not match the
* expected safe value. The main culprits are usually
* pagevecs. With lru_add_drain() we drain the pagevecs
* on the local CPU so that hopefully the refcount will
* reach the expected safe value.
*/
lru_add_drain();
*drain_lru_called = true;
/* The caller should try again immediately */
return true;
}
/* /*
* Requests the Ultravisor to make a page accessible to a guest. * Requests the Ultravisor to make a page accessible to a guest.
* If it's brought in the first time, it will be cleared. If * If it's brought in the first time, it will be cleared. If
...@@ -275,7 +305,7 @@ static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_str ...@@ -275,7 +305,7 @@ static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_str
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
bool local_drain = false; bool drain_lru_called = false;
spinlock_t *ptelock; spinlock_t *ptelock;
unsigned long uaddr; unsigned long uaddr;
struct folio *folio; struct folio *folio;
...@@ -331,37 +361,21 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) ...@@ -331,37 +361,21 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
out: out:
mmap_read_unlock(gmap->mm); mmap_read_unlock(gmap->mm);
if (rc == -EAGAIN) { switch (rc) {
case -EAGAIN:
/* /*
* If we are here because the UVC returned busy or partial * If we are here because the UVC returned busy or partial
* completion, this is just a useless check, but it is safe. * completion, this is just a useless check, but it is safe.
*/ */
folio_wait_writeback(folio); folio_wait_writeback(folio);
folio_put(folio); folio_put(folio);
} else if (rc == -EBUSY) {
/*
* If we have tried a local drain and the folio refcount
* still does not match our expected safe value, try with a
* system wide drain. This is needed if the pagevecs holding
* the page are on a different CPU.
*/
if (local_drain) {
lru_add_drain_all();
/* We give up here, and let the caller try again */
return -EAGAIN; return -EAGAIN;
} case -EBUSY:
/* /* Additional folio references. */
* We are here if the folio refcount does not match the if (drain_lru(&drain_lru_called))
* expected safe value. The main culprits are usually
* pagevecs. With lru_add_drain() we drain the pagevecs
* on the local CPU so that hopefully the refcount will
* reach the expected safe value.
*/
lru_add_drain();
local_drain = true;
/* And now we try again immediately after draining */
goto again; goto again;
} else if (rc == -ENXIO) { return -EAGAIN;
case -ENXIO:
if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE)) if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
return -EFAULT; return -EFAULT;
return -EAGAIN; return -EAGAIN;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment