Commit 0186f47e authored by Kumar Gala's avatar Kumar Gala Committed by Paul Mackerras

powerpc: Use RCU based pte freeing mechanism for all powerpc

Refactor the RCU based pte free code that was used on ppc64 to be used
on all powerpc.

Additionally refactor pte_free() & pte_free_kernel() into common code
between ppc32 & ppc64.
Signed-off-by: default avatarKumar Gala <galak@kernel.crashing.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent df3b8611
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
#include <linux/threads.h> #include <linux/threads.h>
#define PTE_NONCACHE_NUM 0 /* dummy for now to share code w/ppc64 */
extern void __bad_pte(pmd_t *pmd); extern void __bad_pte(pmd_t *pmd);
extern pgd_t *pgd_alloc(struct mm_struct *mm); extern pgd_t *pgd_alloc(struct mm_struct *mm);
...@@ -33,10 +35,13 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); ...@@ -33,10 +35,13 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
extern void pte_free(struct mm_struct *mm, pgtable_t pte);
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) static inline void pgtable_free(pgtable_free_t pgf)
{
void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
free_page((unsigned long)p);
}
#define check_pgt_cache() do { } while (0) #define check_pgt_cache() do { } while (0)
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/percpu.h> #include <linux/percpu.h>
...@@ -108,31 +107,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, ...@@ -108,31 +107,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
return page; return page;
} }
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
pgtable_page_dtor(ptepage);
__free_page(ptepage);
}
#define PGF_CACHENUM_MASK 0x7
typedef struct pgtable_free {
unsigned long val;
} pgtable_free_t;
static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
unsigned long mask)
{
BUG_ON(cachenum > PGF_CACHENUM_MASK);
return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
}
static inline void pgtable_free(pgtable_free_t pgf) static inline void pgtable_free(pgtable_free_t pgf)
{ {
void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
...@@ -144,14 +118,6 @@ static inline void pgtable_free(pgtable_free_t pgf) ...@@ -144,14 +118,6 @@ static inline void pgtable_free(pgtable_free_t pgf)
kmem_cache_free(pgtable_cache[cachenum], p); kmem_cache_free(pgtable_cache[cachenum], p);
} }
extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
#define __pte_free_tlb(tlb,ptepage) \
do { \
pgtable_page_dtor(ptepage); \
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
} while (0)
#define __pmd_free_tlb(tlb, pmd) \ #define __pmd_free_tlb(tlb, pmd) \
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
......
...@@ -2,11 +2,52 @@ ...@@ -2,11 +2,52 @@
#define _ASM_POWERPC_PGALLOC_H #define _ASM_POWERPC_PGALLOC_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/mm.h>
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
pgtable_page_dtor(ptepage);
__free_page(ptepage);
}
typedef struct pgtable_free {
unsigned long val;
} pgtable_free_t;
#define PGF_CACHENUM_MASK 0x7
static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
unsigned long mask)
{
BUG_ON(cachenum > PGF_CACHENUM_MASK);
return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
}
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <asm/pgalloc-64.h> #include <asm/pgalloc-64.h>
#else #else
#include <asm/pgalloc-32.h> #include <asm/pgalloc-32.h>
#endif #endif
extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
#ifdef CONFIG_SMP
#define __pte_free_tlb(tlb,ptepage) \
do { \
pgtable_page_dtor(ptepage); \
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
} while (0)
#else
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PGALLOC_H */ #endif /* _ASM_POWERPC_PGALLOC_H */
...@@ -6,7 +6,7 @@ ifeq ($(CONFIG_PPC64),y) ...@@ -6,7 +6,7 @@ ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc EXTRA_CFLAGS += -mno-minimal-toc
endif endif
obj-y := fault.o mem.o \ obj-y := fault.o mem.o pgtable.o \
init_$(CONFIG_WORD_SIZE).o \ init_$(CONFIG_WORD_SIZE).o \
pgtable_$(CONFIG_WORD_SIZE).o \ pgtable_$(CONFIG_WORD_SIZE).o \
mmu_context_$(CONFIG_WORD_SIZE).o mmu_context_$(CONFIG_WORD_SIZE).o
......
...@@ -35,36 +35,6 @@ mmu_hash_lock: ...@@ -35,36 +35,6 @@ mmu_hash_lock:
.space 4 .space 4
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/*
* Sync CPUs with hash_page taking & releasing the hash
* table lock
*/
#ifdef CONFIG_SMP
.text
_GLOBAL(hash_page_sync)
mfmsr r10
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
mtmsr r0
lis r8,mmu_hash_lock@h
ori r8,r8,mmu_hash_lock@l
lis r0,0x0fff
b 10f
11: lwz r6,0(r8)
cmpwi 0,r6,0
bne 11b
10: lwarx r6,0,r8
cmpwi 0,r6,0
bne- 11b
stwcx. r0,0,r8
bne- 10b
isync
eieio
li r0,0
stw r0,0(r8)
mtmsr r10
blr
#endif /* CONFIG_SMP */
/* /*
* Load a PTE into the hash table, if possible. * Load a PTE into the hash table, if possible.
* The address is in r4, and r3 contains an access flag: * The address is in r4, and r3 contains an access flag:
......
/*
* This file contains common routines for dealing with free of page tables
*
* Derived from arch/powerpc/mm/tlb_64.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Dave Engebretsen <engebret@us.ibm.com>
* Rework for PPC64 port.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
static unsigned long pte_freelist_forced_free;
struct pte_freelist_batch
{
struct rcu_head rcu;
unsigned int index;
pgtable_free_t tables[0];
};
#define PTE_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
/ sizeof(pgtable_free_t))
static void pte_free_smp_sync(void *arg)
{
/* Do nothing, just ensure we sync with all CPUs */
}
/* This is only called when we are critically out of memory
* (and fail to get a page in pte_free_tlb).
*/
static void pgtable_free_now(pgtable_free_t pgf)
{
pte_freelist_forced_free++;
smp_call_function(pte_free_smp_sync, NULL, 1);
pgtable_free(pgf);
}
static void pte_free_rcu_callback(struct rcu_head *head)
{
struct pte_freelist_batch *batch =
container_of(head, struct pte_freelist_batch, rcu);
unsigned int i;
for (i = 0; i < batch->index; i++)
pgtable_free(batch->tables[i]);
free_page((unsigned long)batch);
}
static void pte_free_submit(struct pte_freelist_batch *batch)
{
INIT_RCU_HEAD(&batch->rcu);
call_rcu(&batch->rcu, pte_free_rcu_callback);
}
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
{
/* This is safe since tlb_gather_mmu has disabled preemption */
cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
if (atomic_read(&tlb->mm->mm_users) < 2 ||
cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
pgtable_free(pgf);
return;
}
if (*batchp == NULL) {
*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
if (*batchp == NULL) {
pgtable_free_now(pgf);
return;
}
(*batchp)->index = 0;
}
(*batchp)->tables[(*batchp)->index++] = pgf;
if ((*batchp)->index == PTE_FREELIST_SIZE) {
pte_free_submit(*batchp);
*batchp = NULL;
}
}
void pte_free_finish(void)
{
/* This is safe since tlb_gather_mmu has disabled preemption */
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
if (*batchp == NULL)
return;
pte_free_submit(*batchp);
*batchp = NULL;
}
...@@ -48,10 +48,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ ...@@ -48,10 +48,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
extern char etext[], _stext[]; extern char etext[], _stext[];
#if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_32)
extern void hash_page_sync(void);
#endif
#ifdef HAVE_BATS #ifdef HAVE_BATS
extern phys_addr_t v_mapped_by_bats(unsigned long va); extern phys_addr_t v_mapped_by_bats(unsigned long va);
extern unsigned long p_mapped_by_bats(phys_addr_t pa); extern unsigned long p_mapped_by_bats(phys_addr_t pa);
...@@ -125,23 +121,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -125,23 +121,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
return ptepage; return ptepage;
} }
void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
#if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_32)
hash_page_sync();
#endif
free_page((unsigned long)pte);
}
void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
#if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_32)
hash_page_sync();
#endif
pgtable_page_dtor(ptepage);
__free_page(ptepage);
}
void __iomem * void __iomem *
ioremap(phys_addr_t addr, unsigned long size) ioremap(phys_addr_t addr, unsigned long size)
{ {
......
...@@ -37,81 +37,6 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); ...@@ -37,81 +37,6 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
* arch/powerpc/include/asm/tlb.h file -- tgall * arch/powerpc/include/asm/tlb.h file -- tgall
*/ */
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
static unsigned long pte_freelist_forced_free;
struct pte_freelist_batch
{
struct rcu_head rcu;
unsigned int index;
pgtable_free_t tables[0];
};
#define PTE_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
/ sizeof(pgtable_free_t))
static void pte_free_smp_sync(void *arg)
{
/* Do nothing, just ensure we sync with all CPUs */
}
/* This is only called when we are critically out of memory
* (and fail to get a page in pte_free_tlb).
*/
static void pgtable_free_now(pgtable_free_t pgf)
{
pte_freelist_forced_free++;
smp_call_function(pte_free_smp_sync, NULL, 1);
pgtable_free(pgf);
}
static void pte_free_rcu_callback(struct rcu_head *head)
{
struct pte_freelist_batch *batch =
container_of(head, struct pte_freelist_batch, rcu);
unsigned int i;
for (i = 0; i < batch->index; i++)
pgtable_free(batch->tables[i]);
free_page((unsigned long)batch);
}
static void pte_free_submit(struct pte_freelist_batch *batch)
{
INIT_RCU_HEAD(&batch->rcu);
call_rcu(&batch->rcu, pte_free_rcu_callback);
}
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
{
/* This is safe since tlb_gather_mmu has disabled preemption */
cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
if (atomic_read(&tlb->mm->mm_users) < 2 ||
cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
pgtable_free(pgf);
return;
}
if (*batchp == NULL) {
*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
if (*batchp == NULL) {
pgtable_free_now(pgf);
return;
}
(*batchp)->index = 0;
}
(*batchp)->tables[(*batchp)->index++] = pgf;
if ((*batchp)->index == PTE_FREELIST_SIZE) {
pte_free_submit(*batchp);
*batchp = NULL;
}
}
/* /*
* A linux PTE was changed and the corresponding hash table entry * A linux PTE was changed and the corresponding hash table entry
...@@ -229,17 +154,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) ...@@ -229,17 +154,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
batch->index = 0; batch->index = 0;
} }
void pte_free_finish(void)
{
/* This is safe since tlb_gather_mmu has disabled preemption */
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
if (*batchp == NULL)
return;
pte_free_submit(*batchp);
*batchp = NULL;
}
/** /**
* __flush_hash_table_range - Flush all HPTEs for a given address range * __flush_hash_table_range - Flush all HPTEs for a given address range
* from the hash table (and the TLB). But keeps * from the hash table (and the TLB). But keeps
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment