Commit a95d133c authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/mm: Move pte_fragment_alloc() to a common location

In preparation of next patch which generalises the use of
pte_fragment_alloc() for all, this patch moves the related functions
in a place that is common to all subarches.

The 8xx will need that for supporting 16k pages, as in that mode
page tables still have a size of 4k.

Since pte_fragment with only once fragment is not different
from what is done in the general case, we can easily migrate all
subarchs to pte fragments.
Reviewed-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent ddfc20a3
......@@ -50,6 +50,7 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
#ifdef CONFIG_SMP
extern void __tlb_remove_table(void *_table);
#endif
void pte_frag_destroy(void *pte_frag);
static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
{
......
......@@ -15,7 +15,9 @@ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o
hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o
obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o $(hash64-y) mmu_context_book3s64.o pgtable-book3s64.o
obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o \
$(hash64-y) mmu_context_book3s64.o \
pgtable-book3s64.o pgtable-frag.o
obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o
obj-$(CONFIG_PPC_BOOK3S_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o
obj-$(CONFIG_PPC_BOOK3S) += tlb_hash$(BITS).o
......
......@@ -164,21 +164,6 @@ static void destroy_contexts(mm_context_t *ctx)
}
}
static void pte_frag_destroy(void *pte_frag)
{
int count;
struct page *page;
page = virt_to_page(pte_frag);
/* drop all the pending references */
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
pgtable_page_dtor(page);
__free_page(page);
}
}
static void pmd_frag_destroy(void *pmd_frag)
{
int count;
......
......@@ -322,91 +322,6 @@ void pmd_fragment_free(unsigned long *pmd)
}
}
static pte_t *get_pte_from_cache(struct mm_struct *mm)
{
void *pte_frag, *ret;
spin_lock(&mm->page_table_lock);
ret = mm->context.pte_frag;
if (ret) {
pte_frag = ret + PTE_FRAG_SIZE;
/*
* If we have taken up all the fragments mark PTE page NULL
*/
if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
pte_frag = NULL;
mm->context.pte_frag = pte_frag;
}
spin_unlock(&mm->page_table_lock);
return (pte_t *)ret;
}
static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
{
void *ret = NULL;
struct page *page;
if (!kernel) {
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
__free_page(page);
return NULL;
}
} else {
page = alloc_page(PGALLOC_GFP);
if (!page)
return NULL;
}
atomic_set(&page->pt_frag_refcount, 1);
ret = page_address(page);
/*
* if we support only one fragment just return the
* allocated page.
*/
if (PTE_FRAG_NR == 1)
return ret;
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
* the allocated page with single fragement
* count.
*/
if (likely(!mm->context.pte_frag)) {
atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
mm->context.pte_frag = ret + PTE_FRAG_SIZE;
}
spin_unlock(&mm->page_table_lock);
return (pte_t *)ret;
}
pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
{
pte_t *pte;
pte = get_pte_from_cache(mm);
if (pte)
return pte;
return __alloc_for_ptecache(mm, kernel);
}
void pte_fragment_free(unsigned long *table, int kernel)
{
struct page *page = virt_to_page(table);
BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&page->pt_frag_refcount)) {
if (!kernel)
pgtable_page_dtor(page);
__free_page(page);
}
}
static inline void pgtable_free(void *table, int index)
{
switch (index) {
......
// SPDX-License-Identifier: GPL-2.0
/*
* Handling Page Tables through page fragments
*
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/hugetlb.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
void pte_frag_destroy(void *pte_frag)
{
int count;
struct page *page;
page = virt_to_page(pte_frag);
/* drop all the pending references */
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
pgtable_page_dtor(page);
__free_page(page);
}
}
static pte_t *get_pte_from_cache(struct mm_struct *mm)
{
void *pte_frag, *ret;
spin_lock(&mm->page_table_lock);
ret = mm->context.pte_frag;
if (ret) {
pte_frag = ret + PTE_FRAG_SIZE;
/*
* If we have taken up all the fragments mark PTE page NULL
*/
if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
pte_frag = NULL;
mm->context.pte_frag = pte_frag;
}
spin_unlock(&mm->page_table_lock);
return (pte_t *)ret;
}
static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
{
void *ret = NULL;
struct page *page;
if (!kernel) {
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
__free_page(page);
return NULL;
}
} else {
page = alloc_page(PGALLOC_GFP);
if (!page)
return NULL;
}
atomic_set(&page->pt_frag_refcount, 1);
ret = page_address(page);
/*
* if we support only one fragment just return the
* allocated page.
*/
if (PTE_FRAG_NR == 1)
return ret;
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
* the allocated page with single fragement
* count.
*/
if (likely(!mm->context.pte_frag)) {
atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
mm->context.pte_frag = ret + PTE_FRAG_SIZE;
}
spin_unlock(&mm->page_table_lock);
return (pte_t *)ret;
}
pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
{
pte_t *pte;
pte = get_pte_from_cache(mm);
if (pte)
return pte;
return __alloc_for_ptecache(mm, kernel);
}
void pte_fragment_free(unsigned long *table, int kernel)
{
struct page *page = virt_to_page(table);
BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&page->pt_frag_refcount)) {
if (!kernel)
pgtable_page_dtor(page);
__free_page(page);
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment