Commit 03566562 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/mm: replace hugetlb_cache by PGT_CACHE(PTE_T_ORDER)

Instead of opencoding cache handling for the special case
of hugepage tables having a single pte_t element, this
patch makes use of the common pgtable_cache helpers
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 129dd323
...@@ -5,8 +5,6 @@ ...@@ -5,8 +5,6 @@
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
#include <asm/page.h> #include <asm/page.h>
extern struct kmem_cache *hugepte_cache;
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/hugetlb.h> #include <asm/book3s/64/hugetlb.h>
......
...@@ -42,6 +42,8 @@ EXPORT_SYMBOL(HPAGE_SHIFT); ...@@ -42,6 +42,8 @@ EXPORT_SYMBOL(HPAGE_SHIFT);
#define hugepd_none(hpd) (hpd_val(hpd) == 0) #define hugepd_none(hpd) (hpd_val(hpd) == 0)
#define PTE_T_ORDER (__builtin_ffs(sizeof(pte_t)) - __builtin_ffs(sizeof(void *)))
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{ {
/* /*
...@@ -61,7 +63,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -61,7 +63,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
int num_hugepd; int num_hugepd;
if (pshift >= pdshift) { if (pshift >= pdshift) {
cachep = hugepte_cache; cachep = PGT_CACHE(PTE_T_ORDER);
num_hugepd = 1 << (pshift - pdshift); num_hugepd = 1 << (pshift - pdshift);
} else { } else {
cachep = PGT_CACHE(pdshift - pshift); cachep = PGT_CACHE(pdshift - pshift);
...@@ -264,7 +266,7 @@ static void hugepd_free_rcu_callback(struct rcu_head *head) ...@@ -264,7 +266,7 @@ static void hugepd_free_rcu_callback(struct rcu_head *head)
unsigned int i; unsigned int i;
for (i = 0; i < batch->index; i++) for (i = 0; i < batch->index; i++)
kmem_cache_free(hugepte_cache, batch->ptes[i]); kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]);
free_page((unsigned long)batch); free_page((unsigned long)batch);
} }
...@@ -277,7 +279,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte) ...@@ -277,7 +279,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
if (atomic_read(&tlb->mm->mm_users) < 2 || if (atomic_read(&tlb->mm->mm_users) < 2 ||
mm_is_thread_local(tlb->mm)) { mm_is_thread_local(tlb->mm)) {
kmem_cache_free(hugepte_cache, hugepte); kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte);
put_cpu_var(hugepd_freelist_cur); put_cpu_var(hugepd_freelist_cur);
return; return;
} }
...@@ -652,7 +654,6 @@ static int __init hugepage_setup_sz(char *str) ...@@ -652,7 +654,6 @@ static int __init hugepage_setup_sz(char *str)
} }
__setup("hugepagesz=", hugepage_setup_sz); __setup("hugepagesz=", hugepage_setup_sz);
struct kmem_cache *hugepte_cache;
static int __init hugetlbpage_init(void) static int __init hugetlbpage_init(void)
{ {
int psize; int psize;
...@@ -702,21 +703,8 @@ static int __init hugetlbpage_init(void) ...@@ -702,21 +703,8 @@ static int __init hugetlbpage_init(void)
if (pdshift > shift) if (pdshift > shift)
pgtable_cache_add(pdshift - shift, NULL); pgtable_cache_add(pdshift - shift, NULL);
#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
else if (!hugepte_cache) { else
/* pgtable_cache_add(PTE_T_ORDER, NULL);
* Create a kmem cache for hugeptes. The bottom bits in
* the pte have size information encoded in them, so
* align them to allow this
*/
hugepte_cache = kmem_cache_create("hugepte-cache",
sizeof(pte_t),
HUGEPD_SHIFT_MASK + 1,
0, NULL);
if (hugepte_cache == NULL)
panic("%s: Unable to create kmem cache "
"for hugeptes\n", __func__);
}
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment