Commit c479ed68 authored by Paul Mackerras's avatar Paul Mackerras

PPC32: Simple mmu_gather implementation for now.

This could be made more efficient by batching the hashtable
flushes but that can be done later.
parent 354f4492
......@@ -31,13 +31,42 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include "mmu_decl.h"
/*
* Called when unmapping pages to flush entries from the TLB/hash table.
*/
void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
{
unsigned long ptephys;
if (Hash != 0) {
ptephys = __pa(ptep) & PAGE_MASK;
flush_hash_pages(mm->context, addr, ptephys, 1);
}
}
/*
* Called at the end of a mmu_gather operation to make sure the
* TLB flush is completely done.
*/
void tlb_flush(mmu_gather_t *tlb)
{
if (Hash == 0) {
/*
* 603 needs to flush the whole TLB here since
* it doesn't use a hash table.
*/
_tlbia();
}
}
/*
* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
......@@ -91,29 +120,6 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
}
}
/*
* Flush all tlb/hash table entries (except perhaps for those
* mapping RAM starting at PAGE_OFFSET, since they never change).
*/
void
flush_tlb_all(void)
{
/*
* Just flush the kernel part of the address space, that's
* all that the current callers of this require.
* Eventually I hope to persuade the powers that be that
* we can and should dispense with flush_tlb_all().
* -- paulus.
*
* In fact this should never get called now that we
* have flush_tlb_kernel_range. -- paulus
*/
printk(KERN_ERR "flush_tlb_all called from %p\n",
__builtin_return_address(0));
flush_range(&init_mm, TASK_SIZE, ~0UL);
FINISH_FLUSH;
}
/*
* Flush kernel TLB entries in the given range
*/
......@@ -124,24 +130,19 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
}
/*
* Flush all the (user) entries for the address space described
* by mm. We can't rely on mm->mmap describing all the entries
* that might be in the hash table.
* Flush all the (user) entries for the address space described by mm.
*/
void flush_tlb_mm(struct mm_struct *mm)
{
struct vm_area_struct *mp;
if (Hash == 0) {
_tlbia();
return;
}
if (mm->map_count) {
struct vm_area_struct *mp;
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
} else {
flush_range(mm, 0, TASK_SIZE);
}
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
FINISH_FLUSH;
}
......@@ -161,7 +162,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
FINISH_FLUSH;
}
/*
* For each address in the range, find the pte for the address
* and check _PAGE_HASHPTE bit; if it is set, find and destroy
......
/*
* BK Id: SCCS/s.tlb.h 1.5 05/17/01 18:14:26 cort
* TLB shootdown specifics for PPC
*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _PPC_TLB_H
#define _PPC_TLB_H
#include <linux/config.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/mmu.h>
#ifdef CONFIG_PPC_STD_MMU
/* Classic PPC with hash-table based MMU... */
struct free_pte_ctx;
extern void tlb_flush(struct free_pte_ctx *tlb);
/* Get the generic bits... */
#include <asm-generic/tlb.h>
/* Nothing needed here in fact... */
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
unsigned long address);
static inline void tlb_remove_tlb_entry(mmu_gather_t *tlb, pte_t *ptep,
unsigned long address)
{
if (pte_val(*ptep) & _PAGE_HASHPTE)
flush_hash_entry(tlb->mm, ptep, address);
}
#else
/* Embedded PPC with software-loaded TLB, very simple... */
struct flush_tlb_arch { };
#define tlb_init_arch(tlb, full_flush) do { } while (0)
#define tlb_finish_arch(tlb) do { } while (0)
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
/* Get the generic bits... */
#include <asm-generic/tlb.h>
#endif /* CONFIG_PPC_STD_MMU */
#endif /* __PPC_TLB_H */
......@@ -22,8 +22,6 @@ extern void _tlbia(void);
#if defined(CONFIG_4xx)
static inline void flush_tlb_all(void)
{ _tlbia(); }
static inline void flush_tlb_mm(struct mm_struct *mm)
{ _tlbia(); }
static inline void flush_tlb_page(struct vm_area_struct *vma,
......@@ -40,8 +38,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
static inline void flush_tlb_all(void)
{ __tlbia(); }
static inline void flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
static inline void flush_tlb_page(struct vm_area_struct *vma,
......@@ -58,7 +54,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
#else /* 6xx, 7xx, 7xxx cpus */
struct mm_struct;
struct vm_area_struct;
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment