Commit 036c0e10 authored by David Hildenbrand's avatar David Hildenbrand Committed by Alexander Gordeev

s390/uv: Convert PG_arch_1 users to only work on small folios

Now that make_folio_secure() may only set PG_arch_1 for small folios,
let's convert relevant remaining UV code to only work on (small) folios
and simply reject large folios early. This way, we'll never end up
touching PG_arch_1 on tail pages of a large folio in UV code.

The folio_get()/folio_put() for functions that are documented to already
hold a folio reference look weird; likely they are required to make
concurrent gmap_make_secure() back off because the caller might only hold
an implicit reference due to the page mapping. So leave that alone for now.
Reviewed-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Link: https://lore.kernel.org/r/20240508182955.358628-5-david@redhat.comSigned-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
parent eef88fe4
......@@ -247,7 +247,9 @@ static inline unsigned long __phys_addr(unsigned long x, bool is_31bit)
#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
#define phys_to_folio(phys) page_folio(phys_to_page(phys))
#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
#define folio_to_phys(page) pfn_to_phys(folio_pfn(folio))
static inline void *pfn_to_virt(unsigned long pfn)
{
......
......@@ -135,14 +135,18 @@ static int uv_destroy_page(unsigned long paddr)
*/
int uv_destroy_owned_page(unsigned long paddr)
{
struct page *page = phys_to_page(paddr);
struct folio *folio = phys_to_folio(paddr);
int rc;
get_page(page);
/* See gmap_make_secure(): large folios cannot be secure */
if (unlikely(folio_test_large(folio)))
return 0;
folio_get(folio);
rc = uv_destroy_page(paddr);
if (!rc)
clear_bit(PG_arch_1, &page->flags);
put_page(page);
clear_bit(PG_arch_1, &folio->flags);
folio_put(folio);
return rc;
}
......@@ -170,14 +174,18 @@ int uv_convert_from_secure(unsigned long paddr)
*/
int uv_convert_owned_from_secure(unsigned long paddr)
{
struct page *page = phys_to_page(paddr);
struct folio *folio = phys_to_folio(paddr);
int rc;
get_page(page);
/* See gmap_make_secure(): large folios cannot be secure */
if (unlikely(folio_test_large(folio)))
return 0;
folio_get(folio);
rc = uv_convert_from_secure(paddr);
if (!rc)
clear_bit(PG_arch_1, &page->flags);
put_page(page);
clear_bit(PG_arch_1, &folio->flags);
folio_put(folio);
return rc;
}
......@@ -479,33 +487,34 @@ EXPORT_SYMBOL_GPL(gmap_destroy_page);
*/
int arch_make_page_accessible(struct page *page)
{
struct folio *folio = page_folio(page);
int rc = 0;
/* Hugepage cannot be protected, so nothing to do */
if (PageHuge(page))
/* See gmap_make_secure(): large folios cannot be secure */
if (unlikely(folio_test_large(folio)))
return 0;
/*
* PG_arch_1 is used in 3 places:
* 1. for kernel page tables during early boot
* 2. for storage keys of huge pages and KVM
* 3. As an indication that this page might be secure. This can
* 3. As an indication that this small folio might be secure. This can
* overindicate, e.g. we set the bit before calling
* convert_to_secure.
* As secure pages are never huge, all 3 variants can co-exists.
*/
if (!test_bit(PG_arch_1, &page->flags))
if (!test_bit(PG_arch_1, &folio->flags))
return 0;
rc = uv_pin_shared(page_to_phys(page));
rc = uv_pin_shared(folio_to_phys(folio));
if (!rc) {
clear_bit(PG_arch_1, &page->flags);
clear_bit(PG_arch_1, &folio->flags);
return 0;
}
rc = uv_convert_from_secure(page_to_phys(page));
rc = uv_convert_from_secure(folio_to_phys(folio));
if (!rc) {
clear_bit(PG_arch_1, &page->flags);
clear_bit(PG_arch_1, &folio->flags);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment