Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
1970282f
Commit
1970282f
authored
Nov 04, 2005
by
Stephen Rothwell
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
powerpc: merge tlbflush.h
Signed-off-by:
Stephen Rothwell
<
sfr@canb.auug.org.au
>
parent
9a0f78f6
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
146 additions
and
52 deletions
+146
-52
include/asm-powerpc/tlbflush.h
include/asm-powerpc/tlbflush.h
+146
-0
include/asm-ppc64/tlbflush.h
include/asm-ppc64/tlbflush.h
+0
-52
No files found.
include/asm-ppc/tlbflush.h
→
include/asm-p
ower
pc/tlbflush.h
View file @
1970282f
#ifndef _ASM_POWERPC_TLBFLUSH_H
#define _ASM_POWERPC_TLBFLUSH_H
/*
* include/asm-ppc/tlbflush.h
* TLB flushing:
*
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
...
...
@@ -7,87 +16,120 @@
* 2 of the License, or (at your option) any later version.
*/
#ifdef __KERNEL__
#ifndef _PPC_TLBFLUSH_H
#define _PPC_TLBFLUSH_H
#include <linux/config.h>
struct
mm_struct
;
#ifdef CONFIG_PPC64
#include <linux/percpu.h>
#include <asm/page.h>
#define PPC64_TLB_BATCH_NR 192
struct
ppc64_tlb_batch
{
unsigned
long
index
;
struct
mm_struct
*
mm
;
pte_t
pte
[
PPC64_TLB_BATCH_NR
];
unsigned
long
vaddr
[
PPC64_TLB_BATCH_NR
];
unsigned
int
large
;
};
DECLARE_PER_CPU
(
struct
ppc64_tlb_batch
,
ppc64_tlb_batch
);
extern
void
__flush_tlb_pending
(
struct
ppc64_tlb_batch
*
batch
);
static
inline
void
flush_tlb_pending
(
void
)
{
struct
ppc64_tlb_batch
*
batch
=
&
get_cpu_var
(
ppc64_tlb_batch
);
if
(
batch
->
index
)
__flush_tlb_pending
(
batch
);
put_cpu_var
(
ppc64_tlb_batch
);
}
extern
void
flush_hash_page
(
unsigned
long
va
,
pte_t
pte
,
int
local
);
void
flush_hash_range
(
unsigned
long
number
,
int
local
);
#else
/* CONFIG_PPC64 */
#include <linux/mm.h>
extern
void
_tlbie
(
unsigned
long
address
);
extern
void
_tlbia
(
void
);
#if defined(CONFIG_4xx)
/*
* TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
* flush_tlb_kernel_range are best implemented as tlbia vs
* specific tlbie's
*/
#if
ndef CONFIG_44x
#define
__tlbia() asm volatile ("sync; tlbia; i
sync" : : : "memory")
#el
se
#define
__tlbia _tlbia
#if
(defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx)
#define
flush_tlb_pending() asm volatile ("tlbia;
sync" : : : "memory")
#el
if defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE)
#define
flush_tlb_pending() _tlbia()
#endif
static
inline
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
__tlbia
();
}
static
inline
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
static
inline
void
flush_tlb_page_nohash
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
static
inline
void
flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
static
inline
void
flush_tlb_kernel_range
(
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
#elif defined(CONFIG_FSL_BOOKE)
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
* On machines which use an MMU hash table, we use this to put a
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
extern
void
update_mmu_cache
(
struct
vm_area_struct
*
,
unsigned
long
,
pte_t
);
/* TODO: determine if flush_tlb_range & flush_tlb_kernel_range
* are best implemented as tlbia vs specific tlbie's */
#endif
/* CONFIG_PPC64 */
#define __tlbia() _tlbia()
#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \
defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx)
static
inline
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
__tlbia
();
}
static
inline
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
static
inline
void
flush_tlb_page_nohash
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
static
inline
void
flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
static
inline
void
flush_tlb_kernel_range
(
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
{
flush_tlb_pending
();
}
static
inline
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
__tlbia
();
}
static
inline
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
{
#ifdef CONFIG_PPC64
flush_tlb_pending
();
#else
_tlbie
(
vmaddr
);
#endif
}
static
inline
void
flush_tlb_page_nohash
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
{
#ifndef CONFIG_PPC64
_tlbie
(
vmaddr
);
#endif
}
static
inline
void
flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
unsigned
long
start
,
unsigned
long
end
)
{
flush_tlb_pending
();
}
static
inline
void
flush_tlb_kernel_range
(
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
unsigned
long
end
)
{
flush_tlb_pending
();
}
#else
/* 6xx, 7xx, 7xxx cpus */
struct
mm_struct
;
struct
vm_area_struct
;
extern
void
flush_tlb_mm
(
struct
mm_struct
*
mm
);
extern
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
);
extern
void
flush_tlb_page_nohash
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
);
extern
void
flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
extern
void
flush_tlb_kernel_range
(
unsigned
long
start
,
unsigned
long
end
);
#endif
/*
...
...
@@ -96,20 +138,9 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
* about our page-table pages. -- paulus
*/
static
inline
void
flush_tlb_pgtables
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
unsigned
long
start
,
unsigned
long
end
)
{
}
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
* On machines which use an MMU hash table, we use this to put a
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
extern
void
update_mmu_cache
(
struct
vm_area_struct
*
,
unsigned
long
,
pte_t
);
#endif
/* _PPC_TLBFLUSH_H */
#endif
/*__KERNEL__ */
#endif
/* _ASM_POWERPC_TLBFLUSH_H */
include/asm-ppc64/tlbflush.h
deleted
100644 → 0
View file @
9a0f78f6
#ifndef _PPC64_TLBFLUSH_H
#define _PPC64_TLBFLUSH_H
/*
* TLB flushing:
*
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
#include <linux/percpu.h>
#include <asm/page.h>
#define PPC64_TLB_BATCH_NR 192
struct
mm_struct
;
struct
ppc64_tlb_batch
{
unsigned
long
index
;
struct
mm_struct
*
mm
;
pte_t
pte
[
PPC64_TLB_BATCH_NR
];
unsigned
long
vaddr
[
PPC64_TLB_BATCH_NR
];
unsigned
int
large
;
};
DECLARE_PER_CPU
(
struct
ppc64_tlb_batch
,
ppc64_tlb_batch
);
extern
void
__flush_tlb_pending
(
struct
ppc64_tlb_batch
*
batch
);
static
inline
void
flush_tlb_pending
(
void
)
{
struct
ppc64_tlb_batch
*
batch
=
&
get_cpu_var
(
ppc64_tlb_batch
);
if
(
batch
->
index
)
__flush_tlb_pending
(
batch
);
put_cpu_var
(
ppc64_tlb_batch
);
}
#define flush_tlb_mm(mm) flush_tlb_pending()
#define flush_tlb_page(vma, addr) flush_tlb_pending()
#define flush_tlb_page_nohash(vma, addr) do { } while (0)
#define flush_tlb_range(vma, start, end) \
do { (void)(start); flush_tlb_pending(); } while (0)
#define flush_tlb_kernel_range(start, end) flush_tlb_pending()
#define flush_tlb_pgtables(mm, start, end) do { } while (0)
extern
void
flush_hash_page
(
unsigned
long
va
,
pte_t
pte
,
int
local
);
void
flush_hash_range
(
unsigned
long
number
,
int
local
);
#endif
/* _PPC64_TLBFLUSH_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment