Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
e37d5b22
Commit
e37d5b22
authored
Jan 04, 2004
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://bk.arm.linux.org.uk/linux-2.6-rmk
into home.osdl.org:/home/torvalds/v2.5/linux
parents
fa751d5c
45b38a50
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
96 additions
and
66 deletions
+96
-66
arch/arm/kernel/dma-isa.c
arch/arm/kernel/dma-isa.c
+1
-0
arch/arm/kernel/fiq.c
arch/arm/kernel/fiq.c
+12
-12
arch/arm/kernel/module.c
arch/arm/kernel/module.c
+4
-3
arch/arm/mm/mm-armv.c
arch/arm/mm/mm-armv.c
+72
-45
include/asm-arm/pgtable.h
include/asm-arm/pgtable.h
+7
-6
No files found.
arch/arm/kernel/dma-isa.c
View file @
e37d5b22
...
@@ -85,6 +85,7 @@ static void isa_enable_dma(dmach_t channel, dma_t *dma)
...
@@ -85,6 +85,7 @@ static void isa_enable_dma(dmach_t channel, dma_t *dma)
break
;
break
;
default:
default:
direction
=
PCI_DMA_NONE
;
break
;
break
;
}
}
...
...
arch/arm/kernel/fiq.c
View file @
e37d5b22
...
@@ -112,12 +112,12 @@ void set_fiq_regs(struct pt_regs *regs)
...
@@ -112,12 +112,12 @@ void set_fiq_regs(struct pt_regs *regs)
{
{
register
unsigned
long
tmp
,
tmp2
;
register
unsigned
long
tmp
,
tmp2
;
__asm__
volatile
(
__asm__
volatile
(
"mrs %0, cpsr
"mrs %0, cpsr
\n
\
mov %1, %3
mov %1, %3
\n
\
msr cpsr_c, %1 @ select FIQ mode
msr cpsr_c, %1 @ select FIQ mode
\n
\
mov r0, r0
mov r0, r0
\n
\
ldmia %2, {r8 - r14}
ldmia %2, {r8 - r14}
\n
\
msr cpsr_c, %0 @ return to SVC mode
msr cpsr_c, %0 @ return to SVC mode
\n
\
mov r0, r0"
mov r0, r0"
:
"=&r"
(
tmp
),
"=&r"
(
tmp2
)
:
"=&r"
(
tmp
),
"=&r"
(
tmp2
)
:
"r"
(
&
regs
->
ARM_r8
),
"I"
(
PSR_I_BIT
|
PSR_F_BIT
|
FIQ_MODE
)
:
"r"
(
&
regs
->
ARM_r8
),
"I"
(
PSR_I_BIT
|
PSR_F_BIT
|
FIQ_MODE
)
...
@@ -132,12 +132,12 @@ void get_fiq_regs(struct pt_regs *regs)
...
@@ -132,12 +132,12 @@ void get_fiq_regs(struct pt_regs *regs)
{
{
register
unsigned
long
tmp
,
tmp2
;
register
unsigned
long
tmp
,
tmp2
;
__asm__
volatile
(
__asm__
volatile
(
"mrs %0, cpsr
"mrs %0, cpsr
\n
\
mov %1, %3
mov %1, %3
\n
\
msr cpsr_c, %1 @ select FIQ mode
msr cpsr_c, %1 @ select FIQ mode
\n
\
mov r0, r0
mov r0, r0
\n
\
stmia %2, {r8 - r14}
stmia %2, {r8 - r14}
\n
\
msr cpsr_c, %0 @ return to SVC mode
msr cpsr_c, %0 @ return to SVC mode
\n
\
mov r0, r0"
mov r0, r0"
:
"=&r"
(
tmp
),
"=&r"
(
tmp2
)
:
"=&r"
(
tmp
),
"=&r"
(
tmp2
)
:
"r"
(
&
regs
->
ARM_r8
),
"I"
(
PSR_I_BIT
|
PSR_F_BIT
|
FIQ_MODE
)
:
"r"
(
&
regs
->
ARM_r8
),
"I"
(
PSR_I_BIT
|
PSR_F_BIT
|
FIQ_MODE
)
...
...
arch/arm/kernel/module.c
View file @
e37d5b22
...
@@ -123,9 +123,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
...
@@ -123,9 +123,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if
(
offset
&
3
||
if
(
offset
&
3
||
offset
<=
(
s32
)
0xfc000000
||
offset
<=
(
s32
)
0xfc000000
||
offset
>=
(
s32
)
0x04000000
)
{
offset
>=
(
s32
)
0x04000000
)
{
printk
(
KERN_ERR
"%s: unable to fixup "
printk
(
KERN_ERR
"relocation: out of range
\n
"
,
"%s: relocation out of range, section "
module
->
name
);
"%d reloc %d sym '%s'
\n
"
,
module
->
name
,
relindex
,
i
,
strtab
+
sym
->
st_name
);
return
-
ENOEXEC
;
return
-
ENOEXEC
;
}
}
...
...
arch/arm/mm/mm-armv.c
View file @
e37d5b22
...
@@ -10,6 +10,7 @@
...
@@ -10,6 +10,7 @@
* Page table sludge for ARM v3 and v4 processor architectures.
* Page table sludge for ARM v3 and v4 processor architectures.
*/
*/
#include <linux/config.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/bootmem.h>
...
@@ -25,23 +26,52 @@
...
@@ -25,23 +26,52 @@
#include <asm/mach/map.h>
#include <asm/mach/map.h>
static
unsigned
int
cachepolicy
__initdata
=
PMD_SECT_WB
;
#define CPOLICY_UNCACHED 0
#define CPOLICY_BUFFERED 1
#define CPOLICY_WRITETHROUGH 2
#define CPOLICY_WRITEBACK 3
#define CPOLICY_WRITEALLOC 4
static
unsigned
int
cachepolicy
__initdata
=
CPOLICY_WRITEBACK
;
static
unsigned
int
ecc_mask
__initdata
=
0
;
static
unsigned
int
ecc_mask
__initdata
=
0
;
pgprot_t
pgprot_kernel
;
EXPORT_SYMBOL
(
pgprot_kernel
);
struct
cachepolicy
{
struct
cachepolicy
{
c
har
*
policy
;
c
onst
char
policy
[
16
]
;
unsigned
int
cr_mask
;
unsigned
int
cr_mask
;
unsigned
int
pmd
;
unsigned
int
pmd
;
unsigned
int
pte
;
};
};
static
struct
cachepolicy
cache_policies
[]
__initdata
=
{
static
struct
cachepolicy
cache_policies
[]
__initdata
=
{
{
"uncached"
,
CR_W
|
CR_C
,
PMD_SECT_UNCACHED
},
{
{
"buffered"
,
CR_C
,
PMD_SECT_BUFFERED
},
.
policy
=
"uncached"
,
{
"writethrough"
,
0
,
PMD_SECT_WT
},
.
cr_mask
=
CR_W
|
CR_C
,
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
.
pmd
=
PMD_SECT_UNCACHED
,
{
"writeback"
,
0
,
PMD_SECT_WB
},
.
pte
=
0
,
{
"writealloc"
,
0
,
PMD_SECT_WBWA
}
},
{
#endif
.
policy
=
"buffered"
,
.
cr_mask
=
CR_C
,
.
pmd
=
PMD_SECT_BUFFERED
,
.
pte
=
PTE_BUFFERABLE
,
},
{
.
policy
=
"writethrough"
,
.
cr_mask
=
0
,
.
pmd
=
PMD_SECT_WT
,
.
pte
=
PTE_CACHEABLE
,
},
{
.
policy
=
"writeback"
,
.
cr_mask
=
0
,
.
pmd
=
PMD_SECT_WB
,
.
pte
=
PTE_BUFFERABLE
|
PTE_CACHEABLE
,
},
{
.
policy
=
"writealloc"
,
.
cr_mask
=
0
,
.
pmd
=
PMD_SECT_WBWA
,
.
pte
=
PTE_BUFFERABLE
|
PTE_CACHEABLE
,
}
};
};
/*
/*
...
@@ -58,7 +88,7 @@ static void __init early_cachepolicy(char **p)
...
@@ -58,7 +88,7 @@ static void __init early_cachepolicy(char **p)
int
len
=
strlen
(
cache_policies
[
i
].
policy
);
int
len
=
strlen
(
cache_policies
[
i
].
policy
);
if
(
memcmp
(
*
p
,
cache_policies
[
i
].
policy
,
len
)
==
0
)
{
if
(
memcmp
(
*
p
,
cache_policies
[
i
].
policy
,
len
)
==
0
)
{
cachepolicy
=
cache_policies
[
i
].
pmd
;
cachepolicy
=
i
;
cr_alignment
&=
~
cache_policies
[
i
].
cr_mask
;
cr_alignment
&=
~
cache_policies
[
i
].
cr_mask
;
cr_no_alignment
&=
~
cache_policies
[
i
].
cr_mask
;
cr_no_alignment
&=
~
cache_policies
[
i
].
cr_mask
;
*
p
+=
len
;
*
p
+=
len
;
...
@@ -306,9 +336,23 @@ static struct mem_types mem_types[] __initdata = {
...
@@ -306,9 +336,23 @@ static struct mem_types mem_types[] __initdata = {
*/
*/
static
void
__init
build_mem_type_table
(
void
)
static
void
__init
build_mem_type_table
(
void
)
{
{
struct
cachepolicy
*
cp
;
unsigned
int
cr
=
get_cr
();
unsigned
int
cr
=
get_cr
();
int
cpu_arch
=
cpu_architecture
();
int
cpu_arch
=
cpu_architecture
();
const
char
*
policy
;
int
i
;
#if defined(CONFIG_CPU_DCACHE_DISABLE)
if
(
cachepolicy
>
CPOLICY_BUFFERED
)
cachepolicy
=
CPOLICY_BUFFERED
;
#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
if
(
cachepolicy
>
CPOLICY_WRITETHROUGH
)
cachepolicy
=
CPOLICY_WRITETHROUGH
;
#endif
if
(
cpu_arch
<
CPU_ARCH_ARMv5
)
{
if
(
cachepolicy
>=
CPOLICY_WRITEALLOC
)
cachepolicy
=
CPOLICY_WRITEBACK
;
ecc_mask
=
0
;
}
/*
/*
* ARMv6 and above have extended page tables.
* ARMv6 and above have extended page tables.
...
@@ -327,56 +371,39 @@ static void __init build_mem_type_table(void)
...
@@ -327,56 +371,39 @@ static void __init build_mem_type_table(void)
mem_types
[
MT_CACHECLEAN
].
prot_sect
|=
PMD_SECT_APX
|
PMD_SECT_AP_WRITE
;
mem_types
[
MT_CACHECLEAN
].
prot_sect
|=
PMD_SECT_APX
|
PMD_SECT_AP_WRITE
;
}
}
/*
cp
=
&
cache_policies
[
cachepolicy
];
* ARMv6 can map the vectors as write-through.
*/
if
(
cpu_arch
>=
CPU_ARCH_ARMv6
)
mem_types
[
MT_VECTORS
].
prot_pte
|=
PTE_CACHEABLE
;
else
mem_types
[
MT_VECTORS
].
prot_pte
|=
PTE_BUFFERABLE
|
PTE_CACHEABLE
;
/*
* ARMv5 and higher can use ECC memory.
*/
if
(
cpu_arch
>=
CPU_ARCH_ARMv5
)
{
if
(
cpu_arch
>=
CPU_ARCH_ARMv5
)
{
mem_types
[
MT_VECTORS
].
prot_l1
|=
ecc_mask
;
mem_types
[
MT_VECTORS
].
prot_pte
|=
cp
->
pte
&
PTE_CACHEABLE
;
mem_types
[
MT_MEMORY
].
prot_sect
|=
ecc_mask
;
}
else
{
}
else
{
mem_types
[
MT_VECTORS
].
prot_pte
|=
cp
->
pte
;
mem_types
[
MT_MINICLEAN
].
prot_sect
&=
~
PMD_SECT_TEX
(
1
);
mem_types
[
MT_MINICLEAN
].
prot_sect
&=
~
PMD_SECT_TEX
(
1
);
if
(
cachepolicy
==
PMD_SECT_WBWA
)
cachepolicy
=
PMD_SECT_WB
;
ecc_mask
=
0
;
}
}
mem_types
[
MT_MEMORY
].
prot_sect
|=
cachepolicy
;
mem_types
[
MT_VECTORS
].
prot_l1
|=
ecc_mask
;
mem_types
[
MT_MEMORY
].
prot_sect
|=
ecc_mask
|
cp
->
pmd
;
switch
(
cachepolicy
)
{
for
(
i
=
0
;
i
<
16
;
i
++
)
{
default:
unsigned
long
v
=
pgprot_val
(
protection_map
[
i
]);
case
PMD_SECT_UNCACHED
:
v
&=
(
~
(
PTE_BUFFERABLE
|
PTE_CACHEABLE
))
|
cp
->
pte
;
policy
=
"uncached"
;
protection_map
[
i
]
=
__pgprot
(
v
);
break
;
}
case
PMD_SECT_BUFFERED
:
mem_types
[
MT_VECTORS
].
prot_pte
|=
PTE_BUFFERABLE
;
pgprot_kernel
=
__pgprot
(
L_PTE_PRESENT
|
L_PTE_YOUNG
|
policy
=
"buffered"
;
L_PTE_DIRTY
|
L_PTE_WRITE
|
break
;
L_PTE_EXEC
|
cp
->
pte
);
switch
(
cp
->
pmd
)
{
case
PMD_SECT_WT
:
case
PMD_SECT_WT
:
mem_types
[
MT_VECTORS
].
prot_pte
|=
PTE_BUFFERABLE
|
PTE_CACHEABLE
;
mem_types
[
MT_CACHECLEAN
].
prot_sect
|=
PMD_SECT_WT
;
mem_types
[
MT_CACHECLEAN
].
prot_sect
|=
PMD_SECT_WT
;
policy
=
"write through"
;
break
;
break
;
case
PMD_SECT_WB
:
case
PMD_SECT_WB
:
mem_types
[
MT_VECTORS
].
prot_pte
|=
PTE_BUFFERABLE
|
PTE_CACHEABLE
;
mem_types
[
MT_CACHECLEAN
].
prot_sect
|=
PMD_SECT_WB
;
policy
=
"write back"
;
break
;
case
PMD_SECT_WBWA
:
case
PMD_SECT_WBWA
:
mem_types
[
MT_VECTORS
].
prot_pte
|=
PTE_BUFFERABLE
|
PTE_CACHEABLE
;
mem_types
[
MT_CACHECLEAN
].
prot_sect
|=
PMD_SECT_WB
;
mem_types
[
MT_CACHECLEAN
].
prot_sect
|=
PMD_SECT_WB
;
policy
=
"write back, write allocate"
;
break
;
break
;
}
}
printk
(
"Memory policy: ECC %sabled, Data cache %s
\n
"
,
printk
(
"Memory policy: ECC %sabled, Data cache %s
\n
"
,
ecc_mask
?
"en"
:
"dis"
,
policy
);
ecc_mask
?
"en"
:
"dis"
,
cp
->
policy
);
}
}
/*
/*
...
...
include/asm-arm/pgtable.h
View file @
e37d5b22
...
@@ -152,16 +152,16 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
...
@@ -152,16 +152,16 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
/*
/*
* The following macros handle the cache and bufferable bits...
* The following macros handle the cache and bufferable bits...
*/
*/
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC
extern
pgprot_t
pgprot_kernel
;
#define PAGE_NONE __pgprot(_L_PTE_DEFAULT)
#define PAGE_NONE __pgprot(_L_PTE_DEFAULT)
#define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
#define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
#define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
#define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
#define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
#define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
#define PAGE_KERNEL __pgprot(_L_PTE_DEFAULT | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | L_PTE_DIRTY | L_PTE_WRITE | L_PTE_EXEC)
#define PAGE_KERNEL pgprot_kernel
#define _PAGE_CHG_MASK (PAGE_MASK | L_PTE_DIRTY | L_PTE_YOUNG)
#endif
/* __ASSEMBLY__ */
#endif
/* __ASSEMBLY__ */
...
@@ -323,7 +323,8 @@ static inline pte_t *pmd_page_kernel(pmd_t pmd)
...
@@ -323,7 +323,8 @@ static inline pte_t *pmd_page_kernel(pmd_t pmd)
static
inline
pte_t
pte_modify
(
pte_t
pte
,
pgprot_t
newprot
)
static
inline
pte_t
pte_modify
(
pte_t
pte
,
pgprot_t
newprot
)
{
{
pte_val
(
pte
)
=
(
pte_val
(
pte
)
&
_PAGE_CHG_MASK
)
|
pgprot_val
(
newprot
);
const
unsigned
long
mask
=
L_PTE_EXEC
|
L_PTE_WRITE
|
L_PTE_USER
;
pte_val
(
pte
)
=
(
pte_val
(
pte
)
&
~
mask
)
|
(
pgprot_val
(
newprot
)
&
mask
);
return
pte
;
return
pte
;
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment