Commit 513bf064 authored by Anton Blanchard's avatar Anton Blanchard

ppc64: create cacheflush.h and tlbflush.h

ppc64: remove local_flush_* functions, just define them directly
parent 76c48a25
......@@ -44,6 +44,7 @@
#include <asm/machdep.h>
#include <asm/lmb.h>
#include <asm/abs_addr.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_PPC_EEH
#include <asm/eeh.h>
#endif
......
......@@ -19,6 +19,7 @@
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
/*
* Create a pte. Used during initialization only.
......
......@@ -32,6 +32,7 @@
#include <asm/pci_dma.h>
#include <linux/pci.h>
#include <asm/Naca.h>
#include <asm/tlbflush.h>
/* Status return values */
#define H_Success 0
......
......@@ -34,9 +34,6 @@
#include <asm/bitops.h>
#include <asm/checksum.h>
#include <asm/pgtable.h>
#include <linux/adb.h>
#include <linux/cuda.h>
#include <linux/pmu.h>
#include <asm/prom.h>
#include <asm/system.h>
#include <asm/pci-bridge.h>
......@@ -45,9 +42,8 @@
#include <asm/machdep.h>
#include <asm/hw_irq.h>
#include <asm/abs_addr.h>
#ifdef CONFIG_SMP
#include <asm/smplock.h>
#endif /* CONFIG_SMP */
#include <asm/cacheflush.h>
#ifdef CONFIG_PPC_ISERIES
#include <asm/iSeries/iSeries_pci.h>
#include <asm/iSeries/iSeries_proc.h>
......
......@@ -35,6 +35,7 @@
#include <asm/pgtable.h>
#include <asm/ppcdebug.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#define DEBUG_SIG 0
......
......@@ -262,18 +262,27 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
}
void
local_flush_tlb_mm(struct mm_struct *mm)
flush_tlb_all(void)
{
/* Implemented to just flush the vmalloc area.
* vmalloc is the only user of flush_tlb_all.
*/
__flush_tlb_range(NULL, VMALLOC_START, VMALLOC_END);
}
void
flush_tlb_mm(struct mm_struct *mm)
{
if (mm->map_count) {
struct vm_area_struct *mp;
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
local_flush_tlb_range(mm, mp->vm_start, mp->vm_end);
__flush_tlb_range(mm, mp->vm_start, mp->vm_end);
} else {
/* MIKEC: It is not clear why this is needed */
/* paulus: it is needed to clear out stale HPTEs
* when an address space (represented by an mm_struct)
* is being destroyed. */
local_flush_tlb_range(mm, USER_START, USER_END);
__flush_tlb_range(mm, USER_START, USER_END);
}
/* XXX are there races with checking cpu_vm_mask? - Anton */
......@@ -284,7 +293,7 @@ local_flush_tlb_mm(struct mm_struct *mm)
* Callers should hold the mm->page_table_lock
*/
void
local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
unsigned long context = 0;
pgd_t *pgd;
......@@ -310,7 +319,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
break;
default:
panic("local_flush_tlb_page: invalid region 0x%016lx", vmaddr);
panic("flush_tlb_page: invalid region 0x%016lx", vmaddr);
}
......@@ -330,7 +339,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
struct tlb_batch_data tlb_batch_array[NR_CPUS][MAX_BATCH_FLUSH];
void
local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
__flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
pgd_t *pgd;
pmd_t *pmd;
......
#ifndef _PPC64_CACHEFLUSH_H
#define _PPC64_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
/*
* No cache flushing is required when address mappings are
* changed, because the caches on PowerPCs are physically
* addressed.
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
extern void flush_dcache_page(struct page *page);
extern void flush_icache_range(unsigned long, unsigned long);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr,
int len);
extern void __flush_dcache_icache(void *page_va);
#endif /* _PPC64_CACHEFLUSH_H */
......@@ -339,43 +339,6 @@ static inline void pte_clear(pte_t * ptep)
pte_update(ptep, ~_PAGE_HPTEFLAGS, 0);
}
struct mm_struct;
struct vm_area_struct;
extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void local_flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end);
#define flush_tlb_all local_flush_tlb_all
#define flush_tlb_mm local_flush_tlb_mm
#define flush_tlb_page local_flush_tlb_page
#define flush_tlb_range(vma, start, end) local_flush_tlb_range(vma->vm_mm, start, end)
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* PPC has hw page tables. */
}
/*
* No cache flushing is required when address mappings are
* changed, because the caches on PowerPCs are physically
* addressed.
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, a, b) do { } while (0)
#define flush_cache_page(vma, p) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
extern void flush_icache_range(unsigned long, unsigned long);
extern void __flush_dcache_icache(void *page_va);
extern void flush_dcache_page(struct page *page);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
extern unsigned long ioremap_bot, ioremap_base;
......@@ -419,19 +382,6 @@ extern void paging_init(void);
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#endif
extern void flush_hash_segments(unsigned low_vsid, unsigned high_vsid);
extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
int local);
void flush_hash_range(unsigned long context, unsigned long number, int local);
/* TLB flush batching */
#define MAX_BATCH_FLUSH 128
struct tlb_batch_data {
pte_t pte;
unsigned long addr;
};
extern struct tlb_batch_data tlb_batch_array[NR_CPUS][MAX_BATCH_FLUSH];
/* Encode and de-code a swap entry */
#define SWP_TYPE(entry) (((entry).val >> 1) & 0x3f)
#define SWP_OFFSET(entry) ((entry).val >> 8)
......
#ifndef _PPC64_TLBFLUSH_H
#define _PPC64_TLBFLUSH_H
#include <linux/threads.h>
#include <linux/mm.h>
#include <asm/page.h>
/*
* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void __flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end);
#define flush_tlb_range(vma, start, end) \
__flush_tlb_range(vma->vm_mm, start, end)
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* PPC has hw page tables. */
}
extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
int local);
void flush_hash_range(unsigned long context, unsigned long number, int local);
/* TLB flush batching */
#define MAX_BATCH_FLUSH 128
struct tlb_batch_data {
pte_t pte;
unsigned long addr;
};
extern struct tlb_batch_data tlb_batch_array[NR_CPUS][MAX_BATCH_FLUSH];
#endif /* _PPC64_TLBFLUSH_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment