Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
1b1c7409
Commit
1b1c7409
authored
Feb 20, 2013
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'misc' into for-linus
Conflicts: arch/arm/include/asm/memory.h
parents
573f8c8d
b28748fb
Changes
23
Show whitespace changes
Inline
Side-by-side
Showing
23 changed files
with
216 additions
and
412 deletions
+216
-412
MAINTAINERS
MAINTAINERS
+9
-1
arch/arm/Kconfig
arch/arm/Kconfig
+3
-0
arch/arm/crypto/aes-armv4.S
arch/arm/crypto/aes-armv4.S
+20
-44
arch/arm/crypto/sha1-armv4-large.S
arch/arm/crypto/sha1-armv4-large.S
+9
-15
arch/arm/include/asm/mach/pci.h
arch/arm/include/asm/mach/pci.h
+1
-0
arch/arm/include/asm/memory.h
arch/arm/include/asm/memory.h
+4
-4
arch/arm/include/asm/outercache.h
arch/arm/include/asm/outercache.h
+1
-0
arch/arm/include/asm/spinlock.h
arch/arm/include/asm/spinlock.h
+1
-15
arch/arm/kernel/bios32.c
arch/arm/kernel/bios32.c
+6
-3
arch/arm/kernel/smp.c
arch/arm/kernel/smp.c
+8
-23
arch/arm/mach-versatile/core.c
arch/arm/mach-versatile/core.c
+14
-1
arch/arm/mach-versatile/pci.c
arch/arm/mach-versatile/pci.c
+6
-5
arch/arm/mm/Makefile
arch/arm/mm/Makefile
+1
-1
arch/arm/mm/context.c
arch/arm/mm/context.c
+3
-0
arch/arm/mm/ioremap.c
arch/arm/mm/ioremap.c
+93
-42
arch/arm/mm/mm.h
arch/arm/mm/mm.h
+12
-0
arch/arm/mm/mmu.c
arch/arm/mm/mmu.c
+17
-19
arch/arm/mm/proc-macros.S
arch/arm/mm/proc-macros.S
+5
-0
arch/arm/mm/proc-v6.S
arch/arm/mm/proc-v6.S
+1
-1
arch/arm/mm/proc-v7-2level.S
arch/arm/mm/proc-v7-2level.S
+1
-1
arch/arm/mm/proc-v7-3level.S
arch/arm/mm/proc-v7-3level.S
+1
-1
arch/arm/mm/vmregion.c
arch/arm/mm/vmregion.c
+0
-205
arch/arm/mm/vmregion.h
arch/arm/mm/vmregion.h
+0
-31
No files found.
MAINTAINERS
View file @
1b1c7409
...
...
@@ -670,8 +670,16 @@ F: drivers/input/serio/ambakmi.*
F: include/linux/amba/kmi.h
ARM PRIMECELL MMCI PL180/1 DRIVER
S: Orphan
M: Russell King <linux@arm.linux.org.uk>
S: Maintained
F: drivers/mmc/host/mmci.*
F: include/linux/amba/mmci.h
ARM PRIMECELL UART PL010 AND PL011 DRIVERS
M: Russell King <linux@arm.linux.org.uk>
S: Maintained
F: drivers/tty/serial/amba-pl01*.c
F: include/linux/amba/serial.h
ARM PRIMECELL BUS SUPPORT
M: Russell King <linux@arm.linux.org.uk>
...
...
arch/arm/Kconfig
View file @
1b1c7409
...
...
@@ -1655,6 +1655,9 @@ config HZ
default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
default 100
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
config THUMB2_KERNEL
bool "Compile the kernel in Thumb-2 mode"
depends on CPU_V7 && !CPU_V6 && !CPU_V6K
...
...
arch/arm/crypto/aes-armv4.S
View file @
1b1c7409
...
...
@@ -34,8 +34,9 @@
@
A
little
glue
here
to
select
the
correct
code
below
for
the
ARM
CPU
@
that
is
being
targetted
.
#include <linux/linkage.h>
.
text
.
code
32
.
type
AES_Te
,%
object
.
align
5
...
...
@@ -145,10 +146,8 @@ AES_Te:
@
void
AES_encrypt
(
const
unsigned
char
*
in
,
unsigned
char
*
out
,
@
const
AES_KEY
*
key
)
{
.
global
AES_encrypt
.
type
AES_encrypt
,%
function
.
align
5
AES_encrypt
:
ENTRY
(
AES_encrypt
)
sub
r3
,
pc
,#
8
@
AES_encrypt
stmdb
sp
!,{
r1
,
r4
-
r12
,
lr
}
mov
r12
,
r0
@
inp
...
...
@@ -239,15 +238,8 @@ AES_encrypt:
strb
r6
,[
r12
,#
14
]
strb
r3
,[
r12
,#
15
]
#endif
#if __ARM_ARCH__>=5
ldmia
sp
!,{
r4
-
r12
,
pc
}
#else
ldmia
sp
!,{
r4
-
r12
,
lr
}
tst
lr
,#
1
moveq
pc
,
lr
@
be
binary
compatible
with
V4
,
yet
.
word
0xe12fff1e
@
interoperable
with
Thumb
ISA
:
-)
#endif
.
size
AES_encrypt
,.-
AES_encrypt
ENDPROC
(
AES_encrypt
)
.
type
_armv4_AES_encrypt
,%
function
.
align
2
...
...
@@ -386,10 +378,8 @@ _armv4_AES_encrypt:
ldr
pc
,[
sp
],#
4
@
pop
and
return
.
size
_armv4_AES_encrypt
,.-
_armv4_AES_encrypt
.
global
private_AES_set_encrypt_key
.
type
private_AES_set_encrypt_key
,%
function
.
align
5
private_AES_set_encrypt_key
:
ENTRY
(
private_AES_set_encrypt_key
)
_armv4_AES_set_encrypt_key
:
sub
r3
,
pc
,#
8
@
AES_set_encrypt_key
teq
r0
,#
0
...
...
@@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key:
.
Ldone
:
mov
r0
,#
0
ldmia
sp
!,{
r4
-
r12
,
lr
}
.
Labrt
:
tst
lr
,#
1
moveq
pc
,
lr
@
be
binary
compatible
with
V4
,
yet
.
word
0xe12fff1e
@
interoperable
with
Thumb
ISA
:
-)
.
size
private_AES_set_encrypt_key
,.-
private_AES_set_encrypt_key
.
Labrt
:
mov
pc
,
lr
ENDPROC
(
private_AES_set_encrypt_key
)
.
global
private_AES_set_decrypt_key
.
type
private_AES_set_decrypt_key
,%
function
.
align
5
private_AES_set_decrypt_key
:
ENTRY
(
private_AES_set_decrypt_key
)
str
lr
,[
sp
,#-
4
]!
@
push
lr
#if 0
@
kernel
does
both
of
these
in
setkey
so
optimise
this
bit
out
by
...
...
@@ -748,15 +734,8 @@ private_AES_set_decrypt_key:
bne
.
Lmix
mov
r0
,#
0
#if __ARM_ARCH__>=5
ldmia
sp
!,{
r4
-
r12
,
pc
}
#else
ldmia
sp
!,{
r4
-
r12
,
lr
}
tst
lr
,#
1
moveq
pc
,
lr
@
be
binary
compatible
with
V4
,
yet
.
word
0xe12fff1e
@
interoperable
with
Thumb
ISA
:
-)
#endif
.
size
private_AES_set_decrypt_key
,.-
private_AES_set_decrypt_key
ENDPROC
(
private_AES_set_decrypt_key
)
.
type
AES_Td
,%
object
.
align
5
...
...
@@ -862,10 +841,8 @@ AES_Td:
@
void
AES_decrypt
(
const
unsigned
char
*
in
,
unsigned
char
*
out
,
@
const
AES_KEY
*
key
)
{
.
global
AES_decrypt
.
type
AES_decrypt
,%
function
.
align
5
AES_decrypt
:
ENTRY
(
AES_decrypt
)
sub
r3
,
pc
,#
8
@
AES_decrypt
stmdb
sp
!,{
r1
,
r4
-
r12
,
lr
}
mov
r12
,
r0
@
inp
...
...
@@ -956,15 +933,8 @@ AES_decrypt:
strb
r6
,[
r12
,#
14
]
strb
r3
,[
r12
,#
15
]
#endif
#if __ARM_ARCH__>=5
ldmia
sp
!,{
r4
-
r12
,
pc
}
#else
ldmia
sp
!,{
r4
-
r12
,
lr
}
tst
lr
,#
1
moveq
pc
,
lr
@
be
binary
compatible
with
V4
,
yet
.
word
0xe12fff1e
@
interoperable
with
Thumb
ISA
:
-)
#endif
.
size
AES_decrypt
,.-
AES_decrypt
ENDPROC
(
AES_decrypt
)
.
type
_armv4_AES_decrypt
,%
function
.
align
2
...
...
@@ -1064,7 +1034,9 @@ _armv4_AES_decrypt:
and
r9
,
lr
,
r1
,
lsr
#
8
ldrb
r7
,[
r10
,
r7
]
@
Td4
[
s1
>>
0
]
ldrb
r1
,[
r10
,
r1
,
lsr
#
24
]
@
Td4
[
s1
>>
24
]
ARM
(
ldrb
r1
,[
r10
,
r1
,
lsr
#
24
]
)
@
Td4
[
s1
>>
24
]
THUMB
(
add
r1
,
r10
,
r1
,
lsr
#
24
)
@
Td4
[
s1
>>
24
]
THUMB
(
ldrb
r1
,[
r1
]
)
ldrb
r8
,[
r10
,
r8
]
@
Td4
[
s1
>>
16
]
eor
r0
,
r7
,
r0
,
lsl
#
24
ldrb
r9
,[
r10
,
r9
]
@
Td4
[
s1
>>
8
]
...
...
@@ -1077,7 +1049,9 @@ _armv4_AES_decrypt:
ldrb
r8
,[
r10
,
r8
]
@
Td4
[
s2
>>
0
]
and
r9
,
lr
,
r2
,
lsr
#
16
ldrb
r2
,[
r10
,
r2
,
lsr
#
24
]
@
Td4
[
s2
>>
24
]
ARM
(
ldrb
r2
,[
r10
,
r2
,
lsr
#
24
]
)
@
Td4
[
s2
>>
24
]
THUMB
(
add
r2
,
r10
,
r2
,
lsr
#
24
)
@
Td4
[
s2
>>
24
]
THUMB
(
ldrb
r2
,[
r2
]
)
eor
r0
,
r0
,
r7
,
lsl
#
8
ldrb
r9
,[
r10
,
r9
]
@
Td4
[
s2
>>
16
]
eor
r1
,
r8
,
r1
,
lsl
#
16
...
...
@@ -1090,7 +1064,9 @@ _armv4_AES_decrypt:
and
r9
,
lr
,
r3
@
i2
ldrb
r9
,[
r10
,
r9
]
@
Td4
[
s3
>>
0
]
ldrb
r3
,[
r10
,
r3
,
lsr
#
24
]
@
Td4
[
s3
>>
24
]
ARM
(
ldrb
r3
,[
r10
,
r3
,
lsr
#
24
]
)
@
Td4
[
s3
>>
24
]
THUMB
(
add
r3
,
r10
,
r3
,
lsr
#
24
)
@
Td4
[
s3
>>
24
]
THUMB
(
ldrb
r3
,[
r3
]
)
eor
r0
,
r0
,
r7
,
lsl
#
16
ldr
r7
,[
r11
,#
0
]
eor
r1
,
r1
,
r8
,
lsl
#
8
...
...
arch/arm/crypto/sha1-armv4-large.S
View file @
1b1c7409
...
...
@@ -51,13 +51,12 @@
@
Profiler
-
assisted
and
platform
-
specific
optimization
resulted
in
10
%
@
improvement
on
Cortex
A8
core
and
12
.2
cycles
per
byte
.
.
text
#include <linux/linkage.h>
.
global
sha1_block_data_order
.
type
sha1_block_data_order
,%
function
.
text
.
align
2
sha1_block_data_order
:
ENTRY
(
sha1_block_data_order
)
stmdb
sp
!,{
r4
-
r12
,
lr
}
add
r2
,
r1
,
r2
,
lsl
#
6
@
r2
to
point
at
the
end
of
r1
ldmia
r0
,{
r3
,
r4
,
r5
,
r6
,
r7
}
...
...
@@ -194,7 +193,7 @@ sha1_block_data_order:
eor
r10
,
r10
,
r7
,
ror
#
2
@
F_00_19
(
B
,
C
,
D
)
str
r9
,[
r14
,#-
4
]!
add
r3
,
r3
,
r10
@
E
+=
F_00_19
(
B
,
C
,
D
)
teq
r14
,
sp
cmp
r14
,
sp
bne
.
L_00_15
@
[((
11
+
4
)*
5
+
2
)*
3
]
#if __ARM_ARCH__<7
ldrb
r10
,[
r1
,#
2
]
...
...
@@ -374,7 +373,9 @@ sha1_block_data_order:
@
F_xx_xx
add
r3
,
r3
,
r9
@
E
+=
X
[
i
]
add
r3
,
r3
,
r10
@
E
+=
F_20_39
(
B
,
C
,
D
)
teq
r14
,
sp
@
preserve
carry
ARM
(
teq
r14
,
sp
)
@
preserve
carry
THUMB
(
mov
r11
,
sp
)
THUMB
(
teq
r14
,
r11
)
@
preserve
carry
bne
.
L_20_39_or_60_79
@
[+((
12
+
3
)*
5
+
2
)*
4
]
bcs
.
L_done
@
[+((
12
+
3
)*
5
+
2
)*
4
],
spare
300
bytes
...
...
@@ -466,7 +467,7 @@ sha1_block_data_order:
add
r3
,
r3
,
r9
@
E
+=
X
[
i
]
add
r3
,
r3
,
r10
@
E
+=
F_40_59
(
B
,
C
,
D
)
add
r3
,
r3
,
r11
,
ror
#
2
teq
r14
,
sp
cmp
r14
,
sp
bne
.
L_40_59
@
[+((
12
+
5
)*
5
+
2
)*
4
]
ldr
r8
,
.
LK_60_79
...
...
@@ -485,19 +486,12 @@ sha1_block_data_order:
teq
r1
,
r2
bne
.
Lloop
@
[+
18
],
total
1307
#if __ARM_ARCH__>=5
ldmia
sp
!,{
r4
-
r12
,
pc
}
#else
ldmia
sp
!,{
r4
-
r12
,
lr
}
tst
lr
,#
1
moveq
pc
,
lr
@
be
binary
compatible
with
V4
,
yet
.
word
0xe12fff1e
@
interoperable
with
Thumb
ISA
:
-)
#endif
.
align
2
.
LK_00_19
:
.
word
0x5a827999
.
LK_20_39
:
.
word
0x6ed9eba1
.
LK_40_59
:
.
word
0x8f1bbcdc
.
LK_60_79
:
.
word
0xca62c1d6
.
size
sha1_block_data_order
,.-
sha1_block_data_order
ENDPROC
(
sha1_block_data_order
)
.
asciz
"
SHA1
block
transform
for
ARMv4
,
CRYPTOGAMS
by
<
appro
@
openssl
.
org
>
"
.
align
2
arch/arm/include/asm/mach/pci.h
View file @
1b1c7409
...
...
@@ -23,6 +23,7 @@ struct hw_pci {
#endif
struct
pci_ops
*
ops
;
int
nr_controllers
;
void
**
private_data
;
int
(
*
setup
)(
int
nr
,
struct
pci_sys_data
*
);
struct
pci_bus
*
(
*
scan
)(
int
nr
,
struct
pci_sys_data
*
);
void
(
*
preinit
)(
void
);
...
...
arch/arm/include/asm/memory.h
View file @
1b1c7409
...
...
@@ -36,23 +36,23 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(
0x01000000
))
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(
SZ_16M
))
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/*
* The maximum size of a 26-bit user space task.
*/
#define TASK_SIZE_26
UL(0x04000000
)
#define TASK_SIZE_26
(UL(1) << 26
)
/*
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
*/
#ifndef CONFIG_THUMB2_KERNEL
#define MODULES_VADDR (PAGE_OFFSET -
16*1024*1024
)
#define MODULES_VADDR (PAGE_OFFSET -
SZ_16M
)
#else
/* smaller range for Thumb-2 symbols relocation (2^24)*/
#define MODULES_VADDR (PAGE_OFFSET -
8*1024*1024
)
#define MODULES_VADDR (PAGE_OFFSET -
SZ_8M
)
#endif
#if TASK_SIZE > MODULES_VADDR
...
...
arch/arm/include/asm/outercache.h
View file @
1b1c7409
...
...
@@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
static
inline
void
outer_flush_all
(
void
)
{
}
static
inline
void
outer_inv_all
(
void
)
{
}
static
inline
void
outer_disable
(
void
)
{
}
static
inline
void
outer_resume
(
void
)
{
}
#endif
...
...
arch/arm/include/asm/spinlock.h
View file @
1b1c7409
...
...
@@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static
inline
void
arch_spin_unlock
(
arch_spinlock_t
*
lock
)
{
unsigned
long
tmp
;
u32
slock
;
smp_mb
();
__asm__
__volatile__
(
" mov %1, #1
\n
"
"1: ldrex %0, [%2]
\n
"
" uadd16 %0, %0, %1
\n
"
" strex %1, %0, [%2]
\n
"
" teq %1, #0
\n
"
" bne 1b"
:
"=&r"
(
slock
),
"=&r"
(
tmp
)
:
"r"
(
&
lock
->
slock
)
:
"cc"
);
lock
->
tickets
.
owner
++
;
dsb_sev
();
}
...
...
arch/arm/kernel/bios32.c
View file @
1b1c7409
...
...
@@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return
irq
;
}
static
int
__init
pcibios_init_resources
(
int
busnr
,
struct
pci_sys_data
*
sys
)
static
int
pcibios_init_resources
(
int
busnr
,
struct
pci_sys_data
*
sys
)
{
int
ret
;
struct
pci_host_bridge_window
*
window
;
...
...
@@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys)
return
0
;
}
static
void
__init
pcibios_init_hw
(
struct
hw_pci
*
hw
,
struct
list_head
*
head
)
static
void
pcibios_init_hw
(
struct
hw_pci
*
hw
,
struct
list_head
*
head
)
{
struct
pci_sys_data
*
sys
=
NULL
;
int
ret
;
...
...
@@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
sys
->
map_irq
=
hw
->
map_irq
;
INIT_LIST_HEAD
(
&
sys
->
resources
);
if
(
hw
->
private_data
)
sys
->
private_data
=
hw
->
private_data
[
nr
];
ret
=
hw
->
setup
(
nr
,
sys
);
if
(
ret
>
0
)
{
...
...
@@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
}
}
void
__init
pci_common_init
(
struct
hw_pci
*
hw
)
void
pci_common_init
(
struct
hw_pci
*
hw
)
{
struct
pci_sys_data
*
sys
;
LIST_HEAD
(
head
);
...
...
arch/arm/kernel/smp.c
View file @
1b1c7409
...
...
@@ -125,18 +125,6 @@ void __init smp_init_cpus(void)
smp_ops
.
smp_init_cpus
();
}
static
void
__init
platform_smp_prepare_cpus
(
unsigned
int
max_cpus
)
{
if
(
smp_ops
.
smp_prepare_cpus
)
smp_ops
.
smp_prepare_cpus
(
max_cpus
);
}
static
void
__cpuinit
platform_secondary_init
(
unsigned
int
cpu
)
{
if
(
smp_ops
.
smp_secondary_init
)
smp_ops
.
smp_secondary_init
(
cpu
);
}
int
__cpuinit
boot_secondary
(
unsigned
int
cpu
,
struct
task_struct
*
idle
)
{
if
(
smp_ops
.
smp_boot_secondary
)
...
...
@@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu)
return
1
;
}
static
void
platform_cpu_die
(
unsigned
int
cpu
)
{
if
(
smp_ops
.
cpu_die
)
smp_ops
.
cpu_die
(
cpu
);
}
static
int
platform_cpu_disable
(
unsigned
int
cpu
)
{
if
(
smp_ops
.
cpu_disable
)
...
...
@@ -257,7 +239,8 @@ void __ref cpu_die(void)
* actual CPU shutdown procedure is at least platform (if not
* CPU) specific.
*/
platform_cpu_die
(
cpu
);
if
(
smp_ops
.
cpu_die
)
smp_ops
.
cpu_die
(
cpu
);
/*
* Do not return to the idle loop - jump back to the secondary
...
...
@@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
/*
* Give the platform a chance to do its own initialisation.
*/
platform_secondary_init
(
cpu
);
if
(
smp_ops
.
smp_secondary_init
)
smp_ops
.
smp_secondary_init
(
cpu
);
notify_cpu_starting
(
cpu
);
...
...
@@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time. A platform should
* re-initialize the map in
platform_smp_prepare_cpus() if
* present != possible (e.g. physical hotplug).
* re-initialize the map in
the platforms smp_prepare_cpus()
*
if
present != possible (e.g. physical hotplug).
*/
init_cpu_present
(
cpu_possible_mask
);
...
...
@@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
* Initialise the SCU if there are more than one CPU
* and let them know where to start.
*/
platform_smp_prepare_cpus
(
max_cpus
);
if
(
smp_ops
.
smp_prepare_cpus
)
smp_ops
.
smp_prepare_cpus
(
max_cpus
);
}
}
...
...
arch/arm/mach-versatile/core.c
View file @
1b1c7409
...
...
@@ -36,6 +36,7 @@
#include <linux/gfp.h>
#include <linux/clkdev.h>
#include <linux/mtd/physmap.h>
#include <linux/bitops.h>
#include <asm/irq.h>
#include <asm/hardware/arm_timer.h>
...
...
@@ -65,16 +66,28 @@
#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE)
#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE)
/* These PIC IRQs are valid in each configuration */
#define PIC_VALID_ALL BIT(SIC_INT_KMI0) | BIT(SIC_INT_KMI1) | \
BIT(SIC_INT_SCI3) | BIT(SIC_INT_UART3) | \
BIT(SIC_INT_CLCD) | BIT(SIC_INT_TOUCH) | \
BIT(SIC_INT_KEYPAD) | BIT(SIC_INT_DoC) | \
BIT(SIC_INT_USB) | BIT(SIC_INT_PCI0) | \
BIT(SIC_INT_PCI1) | BIT(SIC_INT_PCI2) | \
BIT(SIC_INT_PCI3)
#if 1
#define IRQ_MMCI0A IRQ_VICSOURCE22
#define IRQ_AACI IRQ_VICSOURCE24
#define IRQ_ETH IRQ_VICSOURCE25
#define PIC_MASK 0xFFD00000
#define PIC_VALID PIC_VALID_ALL
#else
#define IRQ_MMCI0A IRQ_SIC_MMCI0A
#define IRQ_AACI IRQ_SIC_AACI
#define IRQ_ETH IRQ_SIC_ETH
#define PIC_MASK 0
#define PIC_VALID PIC_VALID_ALL | BIT(SIC_INT_MMCI0A) | \
BIT(SIC_INT_MMCI1A) | BIT(SIC_INT_AACI) | \
BIT(SIC_INT_ETH)
#endif
/* Lookup table for finding a DT node that represents the vic instance */
...
...
@@ -102,7 +115,7 @@ void __init versatile_init_irq(void)
VERSATILE_SIC_BASE
);
fpga_irq_init
(
VA_SIC_BASE
,
"SIC"
,
IRQ_SIC_START
,
IRQ_VICSOURCE31
,
~
PIC_MASK
,
np
);
IRQ_VICSOURCE31
,
PIC_VALID
,
np
);
/*
* Interrupts on secondary controller from 0 to 8 are routed to
...
...
arch/arm/mach-versatile/pci.c
View file @
1b1c7409
...
...
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
...
...
@@ -327,12 +328,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
int
irq
;
/* slot, pin, irq
* 24 1
27
* 25 1
28
* 26 1
29
* 27 1
30
* 24 1
IRQ_SIC_PCI0
* 25 1
IRQ_SIC_PCI1
* 26 1
IRQ_SIC_PCI2
* 27 1
IRQ_SIC_PCI3
*/
irq
=
27
+
((
slot
-
24
+
pin
-
1
)
&
3
);
irq
=
IRQ_SIC_PCI0
+
((
slot
-
24
+
pin
-
1
)
&
3
);
return
irq
;
}
...
...
arch/arm/mm/Makefile
View file @
1b1c7409
...
...
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
iomap.o
obj-$(CONFIG_MMU)
+=
fault-armv.o flush.o idmap.o ioremap.o
\
mmap.o pgd.o mmu.o
vmregion.o
mmap.o pgd.o mmu.o
ifneq
($(CONFIG_MMU),y)
obj-y
+=
nommu.o
...
...
arch/arm/mm/context.c
View file @
1b1c7409
...
...
@@ -34,6 +34,9 @@
* The ASID is used to tag entries in the CPU caches and TLBs.
* The context ID is used by debuggers and trace logic, and
* should be unique within all running processes.
*
* In big endian operation, the two 32 bit words are swapped if accesed by
* non 64-bit operations.
*/
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
...
...
arch/arm/mm/ioremap.c
View file @
1b1c7409
...
...
@@ -39,6 +39,70 @@
#include <asm/mach/pci.h>
#include "mm.h"
LIST_HEAD
(
static_vmlist
);
static
struct
static_vm
*
find_static_vm_paddr
(
phys_addr_t
paddr
,
size_t
size
,
unsigned
int
mtype
)
{
struct
static_vm
*
svm
;
struct
vm_struct
*
vm
;
list_for_each_entry
(
svm
,
&
static_vmlist
,
list
)
{
vm
=
&
svm
->
vm
;
if
(
!
(
vm
->
flags
&
VM_ARM_STATIC_MAPPING
))
continue
;
if
((
vm
->
flags
&
VM_ARM_MTYPE_MASK
)
!=
VM_ARM_MTYPE
(
mtype
))
continue
;
if
(
vm
->
phys_addr
>
paddr
||
paddr
+
size
-
1
>
vm
->
phys_addr
+
vm
->
size
-
1
)
continue
;
return
svm
;
}
return
NULL
;
}
struct
static_vm
*
find_static_vm_vaddr
(
void
*
vaddr
)
{
struct
static_vm
*
svm
;
struct
vm_struct
*
vm
;
list_for_each_entry
(
svm
,
&
static_vmlist
,
list
)
{
vm
=
&
svm
->
vm
;
/* static_vmlist is ascending order */
if
(
vm
->
addr
>
vaddr
)
break
;
if
(
vm
->
addr
<=
vaddr
&&
vm
->
addr
+
vm
->
size
>
vaddr
)
return
svm
;
}
return
NULL
;
}
void
__init
add_static_vm_early
(
struct
static_vm
*
svm
)
{
struct
static_vm
*
curr_svm
;
struct
vm_struct
*
vm
;
void
*
vaddr
;
vm
=
&
svm
->
vm
;
vm_area_add_early
(
vm
);
vaddr
=
vm
->
addr
;
list_for_each_entry
(
curr_svm
,
&
static_vmlist
,
list
)
{
vm
=
&
curr_svm
->
vm
;
if
(
vm
->
addr
>
vaddr
)
break
;
}
list_add_tail
(
&
svm
->
list
,
&
curr_svm
->
list
);
}
int
ioremap_page
(
unsigned
long
virt
,
unsigned
long
phys
,
const
struct
mem_type
*
mtype
)
{
...
...
@@ -197,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
const
struct
mem_type
*
type
;
int
err
;
unsigned
long
addr
;
struct
vm_struct
*
area
;
struct
vm_struct
*
area
;
phys_addr_t
paddr
=
__pfn_to_phys
(
pfn
);
#ifndef CONFIG_ARM_LPAE
/*
* High mappings must be supersection aligned
*/
if
(
pfn
>=
0x100000
&&
(
__pfn_to_phys
(
pfn
)
&
~
SUPERSECTION_MASK
))
if
(
pfn
>=
0x100000
&&
(
paddr
&
~
SUPERSECTION_MASK
))
return
NULL
;
#endif
...
...
@@ -219,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
/*
* Try to reuse one of the static mapping whenever possible.
*/
read_lock
(
&
vmlist_lock
);
for
(
area
=
vmlist
;
area
;
area
=
area
->
next
)
{
if
(
!
size
||
(
sizeof
(
phys_addr_t
)
==
4
&&
pfn
>=
0x100000
))
break
;
if
(
!
(
area
->
flags
&
VM_ARM_STATIC_MAPPING
))
continue
;
if
((
area
->
flags
&
VM_ARM_MTYPE_MASK
)
!=
VM_ARM_MTYPE
(
mtype
))
continue
;
if
(
__phys_to_pfn
(
area
->
phys_addr
)
>
pfn
||
__pfn_to_phys
(
pfn
)
+
size
-
1
>
area
->
phys_addr
+
area
->
size
-
1
)
continue
;
/* we can drop the lock here as we know *area is static */
read_unlock
(
&
vmlist_lock
);
addr
=
(
unsigned
long
)
area
->
addr
;
addr
+=
__pfn_to_phys
(
pfn
)
-
area
->
phys_addr
;
if
(
size
&&
!
(
sizeof
(
phys_addr_t
)
==
4
&&
pfn
>=
0x100000
))
{
struct
static_vm
*
svm
;
svm
=
find_static_vm_paddr
(
paddr
,
size
,
mtype
);
if
(
svm
)
{
addr
=
(
unsigned
long
)
svm
->
vm
.
addr
;
addr
+=
paddr
-
svm
->
vm
.
phys_addr
;
return
(
void
__iomem
*
)
(
offset
+
addr
);
}
read_unlock
(
&
vmlist_lock
);
}
/*
* Don't allow RAM to be mapped - this causes problems with ARMv6+
...
...
@@ -248,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
if
(
!
area
)
return
NULL
;
addr
=
(
unsigned
long
)
area
->
addr
;
area
->
phys_addr
=
__pfn_to_phys
(
pfn
)
;
area
->
phys_addr
=
paddr
;
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
if
(
DOMAIN_IO
==
0
&&
(((
cpu_architecture
()
>=
CPU_ARCH_ARMv6
)
&&
(
get_cr
()
&
CR_XP
))
||
cpu_is_xsc3
())
&&
pfn
>=
0x100000
&&
!
((
__pfn_to_phys
(
pfn
)
|
size
|
addr
)
&
~
SUPERSECTION_MASK
))
{
!
((
paddr
|
size
|
addr
)
&
~
SUPERSECTION_MASK
))
{
area
->
flags
|=
VM_ARM_SECTION_MAPPING
;
err
=
remap_area_supersections
(
addr
,
pfn
,
size
,
type
);
}
else
if
(
!
((
__pfn_to_phys
(
pfn
)
|
size
|
addr
)
&
~
PMD_MASK
))
{
}
else
if
(
!
((
paddr
|
size
|
addr
)
&
~
PMD_MASK
))
{
area
->
flags
|=
VM_ARM_SECTION_MAPPING
;
err
=
remap_area_sections
(
addr
,
pfn
,
size
,
type
);
}
else
#endif
err
=
ioremap_page_range
(
addr
,
addr
+
size
,
__pfn_to_phys
(
pfn
)
,
err
=
ioremap_page_range
(
addr
,
addr
+
size
,
paddr
,
__pgprot
(
type
->
prot_pte
));
if
(
err
)
{
...
...
@@ -346,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
void
__iounmap
(
volatile
void
__iomem
*
io_addr
)
{
void
*
addr
=
(
void
*
)(
PAGE_MASK
&
(
unsigned
long
)
io_addr
);
struct
vm_struct
*
vm
;
struct
static_vm
*
s
vm
;
read_lock
(
&
vmlist_lock
);
for
(
vm
=
vmlist
;
vm
;
vm
=
vm
->
next
)
{
if
(
vm
->
addr
>
addr
)
break
;
if
(
!
(
vm
->
flags
&
VM_IOREMAP
))
continue
;
/* If this is a static mapping we must leave it alone */
if
((
vm
->
flags
&
VM_ARM_STATIC_MAPPING
)
&&
(
vm
->
addr
<=
addr
)
&&
(
vm
->
addr
+
vm
->
size
>
addr
))
{
read_unlock
(
&
vmlist_lock
);
/* If this is a static mapping, we must leave it alone */
svm
=
find_static_vm_vaddr
(
addr
);
if
(
svm
)
return
;
}
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
{
struct
vm_struct
*
vm
;
vm
=
find_vm_area
(
addr
);
/*
* If this is a section based mapping we need to handle it
* specially as the VM subsystem does not know how to handle
* such a beast.
*/
if
((
vm
->
addr
==
addr
)
&&
(
vm
->
flags
&
VM_ARM_SECTION_MAPPING
))
{
if
(
vm
&&
(
vm
->
flags
&
VM_ARM_SECTION_MAPPING
))
unmap_area_sections
((
unsigned
long
)
vm
->
addr
,
vm
->
size
);
break
;
}
#endif
}
read_unlock
(
&
vmlist_lock
);
vunmap
(
addr
);
}
...
...
arch/arm/mm/mm.h
View file @
1b1c7409
#ifdef CONFIG_MMU
#include <linux/list.h>
#include <linux/vmalloc.h>
/* the upper-most page table pointer */
extern
pmd_t
*
top_pmd
;
...
...
@@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
/* consistent regions used by dma_alloc_attrs() */
#define VM_ARM_DMA_CONSISTENT 0x20000000
struct
static_vm
{
struct
vm_struct
vm
;
struct
list_head
list
;
};
extern
struct
list_head
static_vmlist
;
extern
struct
static_vm
*
find_static_vm_vaddr
(
void
*
vaddr
);
extern
__init
void
add_static_vm_early
(
struct
static_vm
*
svm
);
#endif
#ifdef CONFIG_ZONE_DMA
...
...
arch/arm/mm/mmu.c
View file @
1b1c7409
...
...
@@ -757,21 +757,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
{
struct
map_desc
*
md
;
struct
vm_struct
*
vm
;
struct
static_vm
*
svm
;
if
(
!
nr
)
return
;
vm
=
early_alloc_aligned
(
sizeof
(
*
vm
)
*
nr
,
__alignof__
(
*
vm
));
svm
=
early_alloc_aligned
(
sizeof
(
*
svm
)
*
nr
,
__alignof__
(
*
s
vm
));
for
(
md
=
io_desc
;
nr
;
md
++
,
nr
--
)
{
create_mapping
(
md
);
vm
=
&
svm
->
vm
;
vm
->
addr
=
(
void
*
)(
md
->
virtual
&
PAGE_MASK
);
vm
->
size
=
PAGE_ALIGN
(
md
->
length
+
(
md
->
virtual
&
~
PAGE_MASK
));
vm
->
phys_addr
=
__pfn_to_phys
(
md
->
pfn
);
vm
->
flags
=
VM_IOREMAP
|
VM_ARM_STATIC_MAPPING
;
vm
->
flags
|=
VM_ARM_MTYPE
(
md
->
type
);
vm
->
caller
=
iotable_init
;
vm_area_add_early
(
vm
++
);
add_static_vm_early
(
s
vm
++
);
}
}
...
...
@@ -779,13 +782,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
void
*
caller
)
{
struct
vm_struct
*
vm
;
struct
static_vm
*
svm
;
svm
=
early_alloc_aligned
(
sizeof
(
*
svm
),
__alignof__
(
*
svm
));
vm
=
early_alloc_aligned
(
sizeof
(
*
vm
),
__alignof__
(
*
vm
))
;
vm
=
&
svm
->
vm
;
vm
->
addr
=
(
void
*
)
addr
;
vm
->
size
=
size
;
vm
->
flags
=
VM_IOREMAP
|
VM_ARM_EMPTY_MAPPING
;
vm
->
caller
=
caller
;
vm_area_add_early
(
vm
);
add_static_vm_early
(
s
vm
);
}
#ifndef CONFIG_ARM_LPAE
...
...
@@ -810,14 +816,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)
static
void
__init
fill_pmd_gaps
(
void
)
{
struct
static_vm
*
svm
;
struct
vm_struct
*
vm
;
unsigned
long
addr
,
next
=
0
;
pmd_t
*
pmd
;
/* we're still single threaded hence no lock needed here */
for
(
vm
=
vmlist
;
vm
;
vm
=
vm
->
next
)
{
if
(
!
(
vm
->
flags
&
(
VM_ARM_STATIC_MAPPING
|
VM_ARM_EMPTY_MAPPING
)))
continue
;
list_for_each_entry
(
svm
,
&
static_vmlist
,
list
)
{
vm
=
&
svm
->
vm
;
addr
=
(
unsigned
long
)
vm
->
addr
;
if
(
addr
<
next
)
continue
;
...
...
@@ -857,19 +862,12 @@ static void __init fill_pmd_gaps(void)
#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
static
void
__init
pci_reserve_io
(
void
)
{
struct
vm_struct
*
vm
;
unsigned
long
addr
;
struct
static_vm
*
svm
;
/* we're still single threaded hence no lock needed here */
for
(
vm
=
vmlist
;
vm
;
vm
=
vm
->
next
)
{
if
(
!
(
vm
->
flags
&
VM_ARM_STATIC_MAPPING
))
continue
;
addr
=
(
unsigned
long
)
vm
->
addr
;
addr
&=
~
(
SZ_2M
-
1
);
if
(
addr
==
PCI_IO_VIRT_BASE
)
svm
=
find_static_vm_vaddr
((
void
*
)
PCI_IO_VIRT_BASE
);
if
(
svm
)
return
;
}
vm_reserve_area_early
(
PCI_IO_VIRT_BASE
,
SZ_2M
,
pci_reserve_io
);
}
#else
...
...
arch/arm/mm/proc-macros.S
View file @
1b1c7409
...
...
@@ -38,9 +38,14 @@
/*
*
mmid
-
get
context
id
from
mm
pointer
(
mm
->
context
.
id
)
*
note
,
this
field
is
64
bit
,
so
in
big
-
endian
the
two
words
are
swapped
too
.
*/
.
macro
mmid
,
rd
,
rn
#ifdef __ARMEB__
ldr
\
rd
,
[
\
rn
,
#
MM_CONTEXT_ID
+
4
]
#else
ldr
\
rd
,
[
\
rn
,
#
MM_CONTEXT_ID
]
#endif
.
endm
/*
...
...
arch/arm/mm/proc-v6.S
View file @
1b1c7409
...
...
@@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area)
ENTRY
(
cpu_v6_switch_mm
)
#ifdef CONFIG_MMU
mov
r2
,
#
0
ldr
r1
,
[
r1
,
#
MM_CONTEXT_ID
]
@
get
mm
->
context
.
id
mmid
r1
,
r1
@
get
mm
->
context
.
id
ALT_SMP
(
orr
r0
,
r0
,
#
TTB_FLAGS_SMP
)
ALT_UP
(
orr
r0
,
r0
,
#
TTB_FLAGS_UP
)
mcr
p15
,
0
,
r2
,
c7
,
c5
,
6
@
flush
BTAC
/
BTB
...
...
arch/arm/mm/proc-v7-2level.S
View file @
1b1c7409
...
...
@@ -40,7 +40,7 @@
ENTRY
(
cpu_v7_switch_mm
)
#ifdef CONFIG_MMU
mov
r2
,
#
0
ldr
r1
,
[
r1
,
#
MM_CONTEXT_ID
]
@
get
mm
->
context
.
id
mmid
r1
,
r1
@
get
mm
->
context
.
id
ALT_SMP
(
orr
r0
,
r0
,
#
TTB_FLAGS_SMP
)
ALT_UP
(
orr
r0
,
r0
,
#
TTB_FLAGS_UP
)
#ifdef CONFIG_ARM_ERRATA_430973
...
...
arch/arm/mm/proc-v7-3level.S
View file @
1b1c7409
...
...
@@ -47,7 +47,7 @@
*/
ENTRY
(
cpu_v7_switch_mm
)
#ifdef CONFIG_MMU
ldr
r1
,
[
r1
,
#
MM_CONTEXT_ID
]
@
get
mm
->
context
.
id
mmid
r1
,
r1
@
get
mm
->
context
.
id
and
r3
,
r1
,
#
0xff
mov
r3
,
r3
,
lsl
#(
48
-
32
)
@
ASID
mcrr
p15
,
0
,
r0
,
r3
,
c2
@
set
TTB
0
...
...
arch/arm/mm/vmregion.c
deleted
100644 → 0
View file @
573f8c8d
#include <linux/fs.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "vmregion.h"
/*
* VM region handling support.
*
* This should become something generic, handling VM region allocations for
* vmalloc and similar (ioremap, module space, etc).
*
* I envisage vmalloc()'s supporting vm_struct becoming:
*
* struct vm_struct {
* struct vmregion region;
* unsigned long flags;
* struct page **pages;
* unsigned int nr_pages;
* unsigned long phys_addr;
* };
*
* get_vm_area() would then call vmregion_alloc with an appropriate
* struct vmregion head (eg):
*
* struct vmregion vmalloc_head = {
* .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
* .vm_start = VMALLOC_START,
* .vm_end = VMALLOC_END,
* };
*
* However, vmalloc_head.vm_start is variable (typically, it is dependent on
* the amount of RAM found at boot time.) I would imagine that get_vm_area()
* would have to initialise this each time prior to calling vmregion_alloc().
*/
struct
arm_vmregion
*
arm_vmregion_alloc
(
struct
arm_vmregion_head
*
head
,
size_t
align
,
size_t
size
,
gfp_t
gfp
,
const
void
*
caller
)
{
unsigned
long
start
=
head
->
vm_start
,
addr
=
head
->
vm_end
;
unsigned
long
flags
;
struct
arm_vmregion
*
c
,
*
new
;
if
(
head
->
vm_end
-
head
->
vm_start
<
size
)
{
printk
(
KERN_WARNING
"%s: allocation too big (requested %#x)
\n
"
,
__func__
,
size
);
goto
out
;
}
new
=
kmalloc
(
sizeof
(
struct
arm_vmregion
),
gfp
);
if
(
!
new
)
goto
out
;
new
->
caller
=
caller
;
spin_lock_irqsave
(
&
head
->
vm_lock
,
flags
);
addr
=
rounddown
(
addr
-
size
,
align
);
list_for_each_entry_reverse
(
c
,
&
head
->
vm_list
,
vm_list
)
{
if
(
addr
>=
c
->
vm_end
)
goto
found
;
addr
=
rounddown
(
c
->
vm_start
-
size
,
align
);
if
(
addr
<
start
)
goto
nospc
;
}
found:
/*
* Insert this entry after the one we found.
*/
list_add
(
&
new
->
vm_list
,
&
c
->
vm_list
);
new
->
vm_start
=
addr
;
new
->
vm_end
=
addr
+
size
;
new
->
vm_active
=
1
;
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
return
new
;
nospc:
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
kfree
(
new
);
out:
return
NULL
;
}
static
struct
arm_vmregion
*
__arm_vmregion_find
(
struct
arm_vmregion_head
*
head
,
unsigned
long
addr
)
{
struct
arm_vmregion
*
c
;
list_for_each_entry
(
c
,
&
head
->
vm_list
,
vm_list
)
{
if
(
c
->
vm_active
&&
c
->
vm_start
==
addr
)
goto
out
;
}
c
=
NULL
;
out:
return
c
;
}
struct
arm_vmregion
*
arm_vmregion_find
(
struct
arm_vmregion_head
*
head
,
unsigned
long
addr
)
{
struct
arm_vmregion
*
c
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
head
->
vm_lock
,
flags
);
c
=
__arm_vmregion_find
(
head
,
addr
);
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
return
c
;
}
struct
arm_vmregion
*
arm_vmregion_find_remove
(
struct
arm_vmregion_head
*
head
,
unsigned
long
addr
)
{
struct
arm_vmregion
*
c
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
head
->
vm_lock
,
flags
);
c
=
__arm_vmregion_find
(
head
,
addr
);
if
(
c
)
c
->
vm_active
=
0
;
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
return
c
;
}
void
arm_vmregion_free
(
struct
arm_vmregion_head
*
head
,
struct
arm_vmregion
*
c
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
head
->
vm_lock
,
flags
);
list_del
(
&
c
->
vm_list
);
spin_unlock_irqrestore
(
&
head
->
vm_lock
,
flags
);
kfree
(
c
);
}
#ifdef CONFIG_PROC_FS
static
int
arm_vmregion_show
(
struct
seq_file
*
m
,
void
*
p
)
{
struct
arm_vmregion
*
c
=
list_entry
(
p
,
struct
arm_vmregion
,
vm_list
);
seq_printf
(
m
,
"0x%08lx-0x%08lx %7lu"
,
c
->
vm_start
,
c
->
vm_end
,
c
->
vm_end
-
c
->
vm_start
);
if
(
c
->
caller
)
seq_printf
(
m
,
" %pS"
,
(
void
*
)
c
->
caller
);
seq_putc
(
m
,
'\n'
);
return
0
;
}
static
void
*
arm_vmregion_start
(
struct
seq_file
*
m
,
loff_t
*
pos
)
{
struct
arm_vmregion_head
*
h
=
m
->
private
;
spin_lock_irq
(
&
h
->
vm_lock
);
return
seq_list_start
(
&
h
->
vm_list
,
*
pos
);
}
static
void
*
arm_vmregion_next
(
struct
seq_file
*
m
,
void
*
p
,
loff_t
*
pos
)
{
struct
arm_vmregion_head
*
h
=
m
->
private
;
return
seq_list_next
(
p
,
&
h
->
vm_list
,
pos
);
}
static
void
arm_vmregion_stop
(
struct
seq_file
*
m
,
void
*
p
)
{
struct
arm_vmregion_head
*
h
=
m
->
private
;
spin_unlock_irq
(
&
h
->
vm_lock
);
}
static
const
struct
seq_operations
arm_vmregion_ops
=
{
.
start
=
arm_vmregion_start
,
.
stop
=
arm_vmregion_stop
,
.
next
=
arm_vmregion_next
,
.
show
=
arm_vmregion_show
,
};
static
int
arm_vmregion_open
(
struct
inode
*
inode
,
struct
file
*
file
)
{
struct
arm_vmregion_head
*
h
=
PDE
(
inode
)
->
data
;
int
ret
=
seq_open
(
file
,
&
arm_vmregion_ops
);
if
(
!
ret
)
{
struct
seq_file
*
m
=
file
->
private_data
;
m
->
private
=
h
;
}
return
ret
;
}
static
const
struct
file_operations
arm_vmregion_fops
=
{
.
open
=
arm_vmregion_open
,
.
read
=
seq_read
,
.
llseek
=
seq_lseek
,
.
release
=
seq_release
,
};
int
arm_vmregion_create_proc
(
const
char
*
path
,
struct
arm_vmregion_head
*
h
)
{
proc_create_data
(
path
,
S_IRUSR
,
NULL
,
&
arm_vmregion_fops
,
h
);
return
0
;
}
#else
int
arm_vmregion_create_proc
(
const
char
*
path
,
struct
arm_vmregion_head
*
h
)
{
return
0
;
}
#endif
arch/arm/mm/vmregion.h
deleted
100644 → 0
View file @
573f8c8d
#ifndef VMREGION_H
#define VMREGION_H
#include <linux/spinlock.h>
#include <linux/list.h>
struct
page
;
struct
arm_vmregion_head
{
spinlock_t
vm_lock
;
struct
list_head
vm_list
;
unsigned
long
vm_start
;
unsigned
long
vm_end
;
};
struct
arm_vmregion
{
struct
list_head
vm_list
;
unsigned
long
vm_start
;
unsigned
long
vm_end
;
int
vm_active
;
const
void
*
caller
;
};
struct
arm_vmregion
*
arm_vmregion_alloc
(
struct
arm_vmregion_head
*
,
size_t
,
size_t
,
gfp_t
,
const
void
*
);
struct
arm_vmregion
*
arm_vmregion_find
(
struct
arm_vmregion_head
*
,
unsigned
long
);
struct
arm_vmregion
*
arm_vmregion_find_remove
(
struct
arm_vmregion_head
*
,
unsigned
long
);
void
arm_vmregion_free
(
struct
arm_vmregion_head
*
,
struct
arm_vmregion
*
);
int
arm_vmregion_create_proc
(
const
char
*
,
struct
arm_vmregion_head
*
);
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment