Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
cdcc9708
Commit
cdcc9708
authored
Nov 09, 2007
by
Paul Mundt
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
sh: Move in the SH-5 mmu_context headers.
Signed-off-by:
Paul Mundt
<
lethal@linux-sh.org
>
parent
9085fa12
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
173 additions
and
271 deletions
+173
-271
include/asm-sh/cpu-sh5/mmu_context.h
include/asm-sh/cpu-sh5/mmu_context.h
+27
-0
include/asm-sh/mmu_context.h
include/asm-sh/mmu_context.h
+24
-63
include/asm-sh/mmu_context_32.h
include/asm-sh/mmu_context_32.h
+47
-0
include/asm-sh/mmu_context_64.h
include/asm-sh/mmu_context_64.h
+75
-0
include/asm-sh64/mmu_context.h
include/asm-sh64/mmu_context.h
+0
-208
No files found.
include/asm-sh/cpu-sh5/mmu_context.h
0 → 100644
View file @
cdcc9708
#ifndef __ASM_SH_CPU_SH5_MMU_CONTEXT_H
#define __ASM_SH_CPU_SH5_MMU_CONTEXT_H
/* Common defines */
#define TLB_STEP 0x00000010
#define TLB_PTEH 0x00000000
#define TLB_PTEL 0x00000008
/* PTEH defines */
#define PTEH_ASID_SHIFT 2
#define PTEH_VALID 0x0000000000000001
#define PTEH_SHARED 0x0000000000000002
#define PTEH_MATCH_ASID 0x00000000000003ff
#ifndef __ASSEMBLY__
/* This has to be a common function because the next location to fill
* information is shared. */
extern
void
__do_tlb_refill
(
unsigned
long
address
,
unsigned
long
long
is_text_not_data
,
pte_t
*
pte
);
/* Profiling counter. */
#ifdef CONFIG_SH64_PROC_TLB
extern
unsigned
long
long
calls_to_do_fast_page_fault
;
#endif
#endif
/* __ASSEMBLY__ */
#endif
/* __ASM_SH_CPU_SH5_MMU_CONTEXT_H */
include/asm-sh/mmu_context.h
View file @
cdcc9708
/*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2003 - 200
6
Paul Mundt
* Copyright (C) 2003 - 200
7
Paul Mundt
*
* ASID handling idea taken from MIPS implementation.
*/
#ifndef __ASM_SH_MMU_CONTEXT_H
#define __ASM_SH_MMU_CONTEXT_H
#ifdef __KERNEL__
#ifdef __KERNEL__
#include <asm/cpu/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
...
...
@@ -19,7 +19,6 @@
* (a) TLB cache version (or round, cycle whatever expression you like)
* (b) ASID (Address Space IDentifier)
*/
#define MMU_CONTEXT_ASID_MASK 0x000000ff
#define MMU_CONTEXT_VERSION_MASK 0xffffff00
#define MMU_CONTEXT_FIRST_VERSION 0x00000100
...
...
@@ -28,10 +27,11 @@
/* ASID is 8-bit value, so it can't be 0x100 */
#define MMU_NO_ASID 0x100
#define cpu_context(cpu, mm) ((mm)->context.id[cpu])
#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & \
MMU_CONTEXT_ASID_MASK)
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
#define cpu_context(cpu, mm) ((mm)->context.id[cpu])
#define cpu_asid(cpu, mm) \
(cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
/*
* Virtual Page Number mask
...
...
@@ -39,6 +39,12 @@
#define MMU_VPN_MASK 0xfffff000
#ifdef CONFIG_MMU
#if defined(CONFIG_SUPERH32)
#include "mmu_context_32.h"
#else
#include "mmu_context_64.h"
#endif
/*
* Get MMU context if needed.
*/
...
...
@@ -59,6 +65,14 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
*/
flush_tlb_all
();
#ifdef CONFIG_SUPERH64
/*
* The SH-5 cache uses the ASIDs, requiring both the I and D
* cache to be flushed when the ASID is exhausted. Weak.
*/
flush_cache_all
();
#endif
/*
* Fix version; Note that we avoid version #0
* to distingush NO_CONTEXT.
...
...
@@ -85,39 +99,6 @@ static inline int init_new_context(struct task_struct *tsk,
return
0
;
}
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static
inline
void
destroy_context
(
struct
mm_struct
*
mm
)
{
/* Do nothing */
}
static
inline
void
set_asid
(
unsigned
long
asid
)
{
unsigned
long
__dummy
;
__asm__
__volatile__
(
"mov.l %2, %0
\n\t
"
"and %3, %0
\n\t
"
"or %1, %0
\n\t
"
"mov.l %0, %2"
:
"=&r"
(
__dummy
)
:
"r"
(
asid
),
"m"
(
__m
(
MMU_PTEH
)),
"r"
(
0xffffff00
));
}
static
inline
unsigned
long
get_asid
(
void
)
{
unsigned
long
asid
;
__asm__
__volatile__
(
"mov.l %1, %0"
:
"=r"
(
asid
)
:
"m"
(
__m
(
MMU_PTEH
)));
asid
&=
MMU_CONTEXT_ASID_MASK
;
return
asid
;
}
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
...
...
@@ -128,17 +109,6 @@ static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
set_asid
(
cpu_asid
(
cpu
,
mm
));
}
/* MMU_TTB is used for optimizing the fault handling. */
static
inline
void
set_TTB
(
pgd_t
*
pgd
)
{
ctrl_outl
((
unsigned
long
)
pgd
,
MMU_TTB
);
}
static
inline
pgd_t
*
get_TTB
(
void
)
{
return
(
pgd_t
*
)
ctrl_inl
(
MMU_TTB
);
}
static
inline
void
switch_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
,
struct
task_struct
*
tsk
)
...
...
@@ -153,17 +123,7 @@ static inline void switch_mm(struct mm_struct *prev,
if
(
!
cpu_test_and_set
(
cpu
,
next
->
cpu_vm_mask
))
activate_context
(
next
,
cpu
);
}
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev, next) \
switch_mm((prev),(next),NULL)
static
inline
void
enter_lazy_tlb
(
struct
mm_struct
*
mm
,
struct
task_struct
*
tsk
)
{
}
#else
/* !CONFIG_MMU */
#else
#define get_mmu_context(mm) do { } while (0)
#define init_new_context(tsk,mm) (0)
#define destroy_context(mm) do { } while (0)
...
...
@@ -173,10 +133,11 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#define get_TTB() (0)
#define activate_context(mm,cpu) do { } while (0)
#define switch_mm(prev,next,tsk) do { } while (0)
#endif
/* CONFIG_MMU */
#define activate_mm(prev, next) switch_mm((prev),(next),NULL)
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) do { } while (0)
#define enter_lazy_tlb(mm,tsk) do { } while (0)
#endif
/* CONFIG_MMU */
#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
/*
...
...
include/asm-sh/mmu_context_32.h
0 → 100644
View file @
cdcc9708
#ifndef __ASM_SH_MMU_CONTEXT_32_H
#define __ASM_SH_MMU_CONTEXT_32_H
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static
inline
void
destroy_context
(
struct
mm_struct
*
mm
)
{
/* Do nothing */
}
static
inline
void
set_asid
(
unsigned
long
asid
)
{
unsigned
long
__dummy
;
__asm__
__volatile__
(
"mov.l %2, %0
\n\t
"
"and %3, %0
\n\t
"
"or %1, %0
\n\t
"
"mov.l %0, %2"
:
"=&r"
(
__dummy
)
:
"r"
(
asid
),
"m"
(
__m
(
MMU_PTEH
)),
"r"
(
0xffffff00
));
}
static
inline
unsigned
long
get_asid
(
void
)
{
unsigned
long
asid
;
__asm__
__volatile__
(
"mov.l %1, %0"
:
"=r"
(
asid
)
:
"m"
(
__m
(
MMU_PTEH
)));
asid
&=
MMU_CONTEXT_ASID_MASK
;
return
asid
;
}
/* MMU_TTB is used for optimizing the fault handling. */
static
inline
void
set_TTB
(
pgd_t
*
pgd
)
{
ctrl_outl
((
unsigned
long
)
pgd
,
MMU_TTB
);
}
static
inline
pgd_t
*
get_TTB
(
void
)
{
return
(
pgd_t
*
)
ctrl_inl
(
MMU_TTB
);
}
#endif
/* __ASM_SH_MMU_CONTEXT_32_H */
include/asm-sh/mmu_context_64.h
0 → 100644
View file @
cdcc9708
#ifndef __ASM_SH_MMU_CONTEXT_64_H
#define __ASM_SH_MMU_CONTEXT_64_H
/*
* sh64-specific mmu_context interface.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <asm/cpu/registers.h>
#include <asm/cacheflush.h>
#define SR_ASID_MASK 0xffffffffff00ffffULL
#define SR_ASID_SHIFT 16
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static
inline
void
destroy_context
(
struct
mm_struct
*
mm
)
{
/* Well, at least free TLB entries */
flush_tlb_mm
(
mm
);
}
static
inline
unsigned
long
get_asid
(
void
)
{
unsigned
long
long
sr
;
asm
volatile
(
"getcon "
__SR
", %0
\n\t
"
:
"=r"
(
sr
));
sr
=
(
sr
>>
SR_ASID_SHIFT
)
&
MMU_CONTEXT_ASID_MASK
;
return
(
unsigned
long
)
sr
;
}
/* Set ASID into SR */
static
inline
void
set_asid
(
unsigned
long
asid
)
{
unsigned
long
long
sr
,
pc
;
asm
volatile
(
"getcon "
__SR
", %0"
:
"=r"
(
sr
));
sr
=
(
sr
&
SR_ASID_MASK
)
|
(
asid
<<
SR_ASID_SHIFT
);
/*
* It is possible that this function may be inlined and so to avoid
* the assembler reporting duplicate symbols we make use of the
* gas trick of generating symbols using numerics and forward
* reference.
*/
asm
volatile
(
"movi 1, %1
\n\t
"
"shlli %1, 28, %1
\n\t
"
"or %0, %1, %1
\n\t
"
"putcon %1, "
__SR
"
\n\t
"
"putcon %0, "
__SSR
"
\n\t
"
"movi 1f, %1
\n\t
"
"ori %1, 1 , %1
\n\t
"
"putcon %1, "
__SPC
"
\n\t
"
"rte
\n
"
"1:
\n\t
"
:
"=r"
(
sr
),
"=r"
(
pc
)
:
"0"
(
sr
));
}
/* No spare register to twiddle, so use a software cache */
extern
pgd_t
*
mmu_pdtp_cache
;
#define set_TTB(pgd) (mmu_pdtp_cache = (pgd))
#define get_TTB() (mmu_pdtp_cache)
#endif
/* __ASM_SH_MMU_CONTEXT_64_H */
include/asm-sh64/mmu_context.h
deleted
100644 → 0
View file @
9085fa12
#ifndef __ASM_SH64_MMU_CONTEXT_H
#define __ASM_SH64_MMU_CONTEXT_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/mmu_context.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
* ASID handling idea taken from MIPS implementation.
*
*/
#ifndef __ASSEMBLY__
/*
* Cache of MMU context last used.
*
* The MMU "context" consists of two things:
* (a) TLB cache version (or cycle, top 24 bits of mmu_context_cache)
* (b) ASID (Address Space IDentifier, bottom 8 bits of mmu_context_cache)
*/
extern
unsigned
long
mmu_context_cache
;
#include <asm/page.h>
#include <asm-generic/mm_hooks.h>
/* Current mm's pgd */
extern
pgd_t
*
mmu_pdtp_cache
;
#define SR_ASID_MASK 0xffffffffff00ffffULL
#define SR_ASID_SHIFT 16
#define MMU_CONTEXT_ASID_MASK 0x000000ff
#define MMU_CONTEXT_VERSION_MASK 0xffffff00
#define MMU_CONTEXT_FIRST_VERSION 0x00000100
#define NO_CONTEXT 0
/* ASID is 8-bit value, so it can't be 0x100 */
#define MMU_NO_ASID 0x100
/*
* Virtual Page Number mask
*/
#define MMU_VPN_MASK 0xfffff000
static
inline
void
get_new_mmu_context
(
struct
mm_struct
*
mm
)
{
extern
void
flush_tlb_all
(
void
);
extern
void
flush_cache_all
(
void
);
unsigned
long
mc
=
++
mmu_context_cache
;
if
(
!
(
mc
&
MMU_CONTEXT_ASID_MASK
))
{
/* We exhaust ASID of this version.
Flush all TLB and start new cycle. */
flush_tlb_all
();
/* We have to flush all caches as ASIDs are
used in cache */
flush_cache_all
();
/* Fix version if needed.
Note that we avoid version #0/asid #0 to distingush NO_CONTEXT. */
if
(
!
mc
)
mmu_context_cache
=
mc
=
MMU_CONTEXT_FIRST_VERSION
;
}
mm
->
context
=
mc
;
}
/*
* Get MMU context if needed.
*/
static
__inline__
void
get_mmu_context
(
struct
mm_struct
*
mm
)
{
if
(
mm
)
{
unsigned
long
mc
=
mmu_context_cache
;
/* Check if we have old version of context.
If it's old, we need to get new context with new version. */
if
((
mm
->
context
^
mc
)
&
MMU_CONTEXT_VERSION_MASK
)
get_new_mmu_context
(
mm
);
}
}
/*
* Initialize the context related info for a new mm_struct
* instance.
*/
static
inline
int
init_new_context
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
)
{
mm
->
context
=
NO_CONTEXT
;
return
0
;
}
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static
inline
void
destroy_context
(
struct
mm_struct
*
mm
)
{
extern
void
flush_tlb_mm
(
struct
mm_struct
*
mm
);
/* Well, at least free TLB entries */
flush_tlb_mm
(
mm
);
}
#endif
/* __ASSEMBLY__ */
/* Common defines */
#define TLB_STEP 0x00000010
#define TLB_PTEH 0x00000000
#define TLB_PTEL 0x00000008
/* PTEH defines */
#define PTEH_ASID_SHIFT 2
#define PTEH_VALID 0x0000000000000001
#define PTEH_SHARED 0x0000000000000002
#define PTEH_MATCH_ASID 0x00000000000003ff
#ifndef __ASSEMBLY__
/* This has to be a common function because the next location to fill
* information is shared. */
extern
void
__do_tlb_refill
(
unsigned
long
address
,
unsigned
long
long
is_text_not_data
,
pte_t
*
pte
);
/* Profiling counter. */
#ifdef CONFIG_SH64_PROC_TLB
extern
unsigned
long
long
calls_to_do_fast_page_fault
;
#endif
static
inline
unsigned
long
get_asid
(
void
)
{
unsigned
long
long
sr
;
asm
volatile
(
"getcon "
__SR
", %0
\n\t
"
:
"=r"
(
sr
));
sr
=
(
sr
>>
SR_ASID_SHIFT
)
&
MMU_CONTEXT_ASID_MASK
;
return
(
unsigned
long
)
sr
;
}
/* Set ASID into SR */
static
inline
void
set_asid
(
unsigned
long
asid
)
{
unsigned
long
long
sr
,
pc
;
asm
volatile
(
"getcon "
__SR
", %0"
:
"=r"
(
sr
));
sr
=
(
sr
&
SR_ASID_MASK
)
|
(
asid
<<
SR_ASID_SHIFT
);
/*
* It is possible that this function may be inlined and so to avoid
* the assembler reporting duplicate symbols we make use of the gas trick
* of generating symbols using numerics and forward reference.
*/
asm
volatile
(
"movi 1, %1
\n\t
"
"shlli %1, 28, %1
\n\t
"
"or %0, %1, %1
\n\t
"
"putcon %1, "
__SR
"
\n\t
"
"putcon %0, "
__SSR
"
\n\t
"
"movi 1f, %1
\n\t
"
"ori %1, 1 , %1
\n\t
"
"putcon %1, "
__SPC
"
\n\t
"
"rte
\n
"
"1:
\n\t
"
:
"=r"
(
sr
),
"=r"
(
pc
)
:
"0"
(
sr
));
}
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
static
__inline__
void
activate_context
(
struct
mm_struct
*
mm
)
{
get_mmu_context
(
mm
);
set_asid
(
mm
->
context
&
MMU_CONTEXT_ASID_MASK
);
}
static
__inline__
void
switch_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
,
struct
task_struct
*
tsk
)
{
if
(
prev
!=
next
)
{
mmu_pdtp_cache
=
next
->
pgd
;
activate_context
(
next
);
}
}
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev, next) \
switch_mm((prev),(next),NULL)
static
inline
void
enter_lazy_tlb
(
struct
mm_struct
*
mm
,
struct
task_struct
*
tsk
)
{
}
#endif
/* __ASSEMBLY__ */
#endif
/* __ASM_SH64_MMU_CONTEXT_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment