Commit 23d0f2d1 authored by Hirokazu Takata's avatar Hirokazu Takata Committed by Linus Torvalds

[PATCH] m32r: Update include/asm-m32r/mmu_context.h

This patch updates include/asm-m32r/mmu_context.h.

	* include/asm-m32r/mmu_context.h:
	- Add #ifdef __KERNEL__
	- Change __inline__ to inline for __KERNEL__ portion.
Signed-off-by: default avatarHirokazu Takata <takata@linux-m32r.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 3764eed4
#ifndef _ASM_M32R_MMU_CONTEXT_H #ifndef _ASM_M32R_MMU_CONTEXT_H
#define _ASM_M32R_MMU_CONTEXT_H #define _ASM_M32R_MMU_CONTEXT_H
/* $Id */ #ifdef __KERNEL__
#include <linux/config.h> #include <linux/config.h>
...@@ -40,7 +40,7 @@ extern unsigned long mmu_context_cache_dat[]; ...@@ -40,7 +40,7 @@ extern unsigned long mmu_context_cache_dat[];
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#define enter_lazy_tlb(mm, tsk) do { } while (0) #define enter_lazy_tlb(mm, tsk) do { } while (0)
static __inline__ void get_new_mmu_context(struct mm_struct *mm) static inline void get_new_mmu_context(struct mm_struct *mm)
{ {
unsigned long mc = ++mmu_context_cache; unsigned long mc = ++mmu_context_cache;
...@@ -59,7 +59,7 @@ static __inline__ void get_new_mmu_context(struct mm_struct *mm) ...@@ -59,7 +59,7 @@ static __inline__ void get_new_mmu_context(struct mm_struct *mm)
/* /*
* Get MMU context if needed. * Get MMU context if needed.
*/ */
static __inline__ void get_mmu_context(struct mm_struct *mm) static inline void get_mmu_context(struct mm_struct *mm)
{ {
if (mm) { if (mm) {
unsigned long mc = mmu_context_cache; unsigned long mc = mmu_context_cache;
...@@ -75,7 +75,7 @@ static __inline__ void get_mmu_context(struct mm_struct *mm) ...@@ -75,7 +75,7 @@ static __inline__ void get_mmu_context(struct mm_struct *mm)
* Initialize the context related info for a new mm_struct * Initialize the context related info for a new mm_struct
* instance. * instance.
*/ */
static __inline__ int init_new_context(struct task_struct *tsk, static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
{ {
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
...@@ -97,12 +97,12 @@ static __inline__ int init_new_context(struct task_struct *tsk, ...@@ -97,12 +97,12 @@ static __inline__ int init_new_context(struct task_struct *tsk,
*/ */
#define destroy_context(mm) do { } while (0) #define destroy_context(mm) do { } while (0)
static __inline__ void set_asid(unsigned long asid) static inline void set_asid(unsigned long asid)
{ {
*(volatile unsigned long *)MASID = (asid & MMU_CONTEXT_ASID_MASK); *(volatile unsigned long *)MASID = (asid & MMU_CONTEXT_ASID_MASK);
} }
static __inline__ unsigned long get_asid(void) static inline unsigned long get_asid(void)
{ {
unsigned long asid; unsigned long asid;
...@@ -116,13 +116,13 @@ static __inline__ unsigned long get_asid(void) ...@@ -116,13 +116,13 @@ static __inline__ unsigned long get_asid(void)
* After we have set current->mm to a new value, this activates * After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings. * the context for the new mm so we see the new mappings.
*/ */
static __inline__ void activate_context(struct mm_struct *mm) static inline void activate_context(struct mm_struct *mm)
{ {
get_mmu_context(mm); get_mmu_context(mm);
set_asid(mm_context(mm) & MMU_CONTEXT_ASID_MASK); set_asid(mm_context(mm) & MMU_CONTEXT_ASID_MASK);
} }
static __inline__ void switch_mm(struct mm_struct *prev, static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next, struct task_struct *tsk) struct mm_struct *next, struct task_struct *tsk)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -165,5 +165,6 @@ static __inline__ void switch_mm(struct mm_struct *prev, ...@@ -165,5 +165,6 @@ static __inline__ void switch_mm(struct mm_struct *prev,
#endif /* not __ASSEMBLY__ */ #endif /* not __ASSEMBLY__ */
#endif /* _ASM_M32R_MMU_CONTEXT_H */ #endif /* __KERNEL__ */
#endif /* _ASM_M32R_MMU_CONTEXT_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment