Commit 1433b36e authored by David Hildenbrand's avatar David Hildenbrand Committed by Alexander Gordeev

s390/hugetlb: Convert PG_arch_1 code to work on folio->flags

Let's make it clearer that we are always working on folio flags and
never page flags of tail pages by converting remaining PG_arch_1 users
that modify page->flags to modify folio->flags instead.

No functional change intended, because we would always have worked with
the head page (where page->flags corresponds to folio->flags) and never
with tail pages.
Reviewed-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Link: https://lore.kernel.org/r/20240508182955.358628-11-david@redhat.comSigned-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
parent 99b3f8f7
...@@ -2733,7 +2733,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, ...@@ -2733,7 +2733,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
{ {
pmd_t *pmd = (pmd_t *)pte; pmd_t *pmd = (pmd_t *)pte;
unsigned long start, end; unsigned long start, end;
struct page *page = pmd_page(*pmd); struct folio *folio = page_folio(pmd_page(*pmd));
/* /*
* The write check makes sure we do not set a key on shared * The write check makes sure we do not set a key on shared
...@@ -2748,7 +2748,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, ...@@ -2748,7 +2748,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
start = pmd_val(*pmd) & HPAGE_MASK; start = pmd_val(*pmd) & HPAGE_MASK;
end = start + HPAGE_SIZE; end = start + HPAGE_SIZE;
__storage_key_init_range(start, end); __storage_key_init_range(start, end);
set_bit(PG_arch_1, &page->flags); set_bit(PG_arch_1, &folio->flags);
cond_resched(); cond_resched();
return 0; return 0;
} }
......
...@@ -121,7 +121,7 @@ static inline pte_t __rste_to_pte(unsigned long rste) ...@@ -121,7 +121,7 @@ static inline pte_t __rste_to_pte(unsigned long rste)
static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
{ {
struct page *page; struct folio *folio;
unsigned long size, paddr; unsigned long size, paddr;
if (!mm_uses_skeys(mm) || if (!mm_uses_skeys(mm) ||
...@@ -129,16 +129,16 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) ...@@ -129,16 +129,16 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
return; return;
if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) { if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
page = pud_page(__pud(rste)); folio = page_folio(pud_page(__pud(rste)));
size = PUD_SIZE; size = PUD_SIZE;
paddr = rste & PUD_MASK; paddr = rste & PUD_MASK;
} else { } else {
page = pmd_page(__pmd(rste)); folio = page_folio(pmd_page(__pmd(rste)));
size = PMD_SIZE; size = PMD_SIZE;
paddr = rste & PMD_MASK; paddr = rste & PMD_MASK;
} }
if (!test_and_set_bit(PG_arch_1, &page->flags)) if (!test_and_set_bit(PG_arch_1, &folio->flags))
__storage_key_init_range(paddr, paddr + size); __storage_key_init_range(paddr, paddr + size);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment