Commit ce415712 authored by Dominik Dingel's avatar Dominik Dingel Committed by Linus Torvalds

s390/hugetlb: remove dead code for sw emulated huge pages

We now support only hugepages on hardware with EDAT1 support.  So we
remove the prepare/release_hugepage hooks and simplify set_huge_pte_at
and huge_ptep_get.
Signed-off-by: default avatarDominik Dingel <dingel@linux.vnet.ibm.com>
Acked-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 08bd4fc1
...@@ -37,9 +37,6 @@ static inline int prepare_hugepage_range(struct file *file, ...@@ -37,9 +37,6 @@ static inline int prepare_hugepage_range(struct file *file,
#define arch_clear_hugepage_flags(page) do { } while (0) #define arch_clear_hugepage_flags(page) do { } while (0)
int arch_prepare_hugepage(struct page *page);
void arch_release_hugepage(struct page *page);
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
......
...@@ -86,31 +86,16 @@ static inline pte_t __pmd_to_pte(pmd_t pmd) ...@@ -86,31 +86,16 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte) pte_t *ptep, pte_t pte)
{ {
pmd_t pmd; pmd_t pmd = __pte_to_pmd(pte);
pmd = __pte_to_pmd(pte);
if (!MACHINE_HAS_HPAGE) {
/* Emulated huge ptes loose the dirty and young bit */
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) |= pte_page(pte)[1].index;
} else
pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
*(pmd_t *) ptep = pmd; *(pmd_t *) ptep = pmd;
} }
pte_t huge_ptep_get(pte_t *ptep) pte_t huge_ptep_get(pte_t *ptep)
{ {
unsigned long origin; pmd_t pmd = *(pmd_t *) ptep;
pmd_t pmd;
pmd = *(pmd_t *) ptep;
if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) |= *(unsigned long *) origin;
/* Emulated huge ptes are young and dirty by definition */
pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_DIRTY;
}
return __pmd_to_pte(pmd); return __pmd_to_pte(pmd);
} }
...@@ -125,45 +110,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, ...@@ -125,45 +110,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
return pte; return pte;
} }
int arch_prepare_hugepage(struct page *page)
{
unsigned long addr = page_to_phys(page);
pte_t pte;
pte_t *ptep;
int i;
if (MACHINE_HAS_HPAGE)
return 0;
ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
if (!ptep)
return -ENOMEM;
pte_val(pte) = addr;
for (i = 0; i < PTRS_PER_PTE; i++) {
set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
pte_val(pte) += PAGE_SIZE;
}
page[1].index = (unsigned long) ptep;
return 0;
}
void arch_release_hugepage(struct page *page)
{
pte_t *ptep;
if (MACHINE_HAS_HPAGE)
return;
ptep = (pte_t *) page[1].index;
if (!ptep)
return;
clear_table((unsigned long *) ptep, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t));
page_table_free(&init_mm, (unsigned long *) ptep);
page[1].index = 0;
}
pte_t *huge_pte_alloc(struct mm_struct *mm, pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz) unsigned long addr, unsigned long sz)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment