Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
29ac878a
Commit
29ac878a
authored
Nov 19, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge master.kernel.org:/home/rmk/linux-2.6-arm
parents
54c4e6b5
a6c61e9d
Changes
15
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
57 additions
and
253 deletions
+57
-253
Documentation/arm/memory.txt
Documentation/arm/memory.txt
+3
-1
arch/arm/kernel/armksyms.c
arch/arm/kernel/armksyms.c
+0
-1
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-common.S
+1
-2
arch/arm/kernel/signal.c
arch/arm/kernel/signal.c
+12
-13
arch/arm/kernel/vmlinux.lds.S
arch/arm/kernel/vmlinux.lds.S
+5
-1
arch/arm/lib/getuser.S
arch/arm/lib/getuser.S
+0
-11
arch/arm/mm/Makefile
arch/arm/mm/Makefile
+1
-1
arch/arm/mm/blockops.c
arch/arm/mm/blockops.c
+0
-185
arch/arm/mm/init.c
arch/arm/mm/init.c
+13
-11
arch/arm/mm/ioremap.c
arch/arm/mm/ioremap.c
+1
-2
drivers/mtd/maps/ipaq-flash.c
drivers/mtd/maps/ipaq-flash.c
+3
-3
drivers/mtd/nand/h1910.c
drivers/mtd/nand/h1910.c
+1
-1
include/asm-arm/arch-ixp4xx/io.h
include/asm-arm/arch-ixp4xx/io.h
+3
-6
include/asm-arm/io.h
include/asm-arm/io.h
+12
-9
include/asm-arm/uaccess.h
include/asm-arm/uaccess.h
+2
-6
No files found.
Documentation/arm/memory.txt
View file @
29ac878a
Kernel Memory Layout on ARM Linux
Russell King <rmk@arm.linux.org.uk>
May 21, 2004 (2.6.6
)
November 17, 2005 (2.6.15
)
This document describes the virtual memory layout which the Linux
kernel uses for ARM processors. It indicates which regions are
...
...
@@ -37,6 +37,8 @@ ff000000 ffbfffff Reserved for future expansion of DMA
mapping region.
VMALLOC_END feffffff Free for platform use, recommended.
VMALLOC_END must be aligned to a 2MB
boundary.
VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
Memory returned by vmalloc/ioremap will
...
...
arch/arm/kernel/armksyms.c
View file @
29ac878a
...
...
@@ -120,7 +120,6 @@ EXPORT_SYMBOL(__arch_strncpy_from_user);
EXPORT_SYMBOL
(
__get_user_1
);
EXPORT_SYMBOL
(
__get_user_2
);
EXPORT_SYMBOL
(
__get_user_4
);
EXPORT_SYMBOL
(
__get_user_8
);
EXPORT_SYMBOL
(
__put_user_1
);
EXPORT_SYMBOL
(
__put_user_2
);
...
...
arch/arm/kernel/entry-common.S
View file @
29ac878a
...
...
@@ -48,8 +48,7 @@ work_pending:
mov
r0
,
sp
@
'regs'
mov
r2
,
why
@
'syscall'
bl
do_notify_resume
disable_irq
@
disable
interrupts
b
no_work_pending
b
ret_slow_syscall
@
Check
work
again
work_resched
:
bl
schedule
...
...
arch/arm/kernel/signal.c
View file @
29ac878a
...
...
@@ -595,23 +595,22 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
*/
ret
|=
!
valid_user_regs
(
regs
);
/*
* Block the signal if we were unsuccessful.
*/
if
(
ret
!=
0
)
{
spin_lock_irq
(
&
tsk
->
sighand
->
siglock
);
sigorsets
(
&
tsk
->
blocked
,
&
tsk
->
blocked
,
&
ka
->
sa
.
sa_mask
);
if
(
!
(
ka
->
sa
.
sa_flags
&
SA_NODEFER
))
sigaddset
(
&
tsk
->
blocked
,
sig
);
recalc_sigpending
();
spin_unlock_irq
(
&
tsk
->
sighand
->
siglock
);
force_sigsegv
(
sig
,
tsk
);
return
;
}
if
(
ret
==
0
)
return
;
/*
* Block the signal if we were successful.
*/
spin_lock_irq
(
&
tsk
->
sighand
->
siglock
);
sigorsets
(
&
tsk
->
blocked
,
&
tsk
->
blocked
,
&
ka
->
sa
.
sa_mask
);
if
(
!
(
ka
->
sa
.
sa_flags
&
SA_NODEFER
))
sigaddset
(
&
tsk
->
blocked
,
sig
);
recalc_sigpending
();
spin_unlock_irq
(
&
tsk
->
sighand
->
siglock
);
force_sigsegv
(
sig
,
tsk
);
}
/*
...
...
arch/arm/kernel/vmlinux.lds.S
View file @
29ac878a
...
...
@@ -172,6 +172,10 @@ SECTIONS
.
comment
0
:
{
*(
.
comment
)
}
}
/*
those
must
never
be
empty
*/
/*
*
These
must
never
be
empty
*
If
you
have
to
comment
these
two
assert
statements
out
,
your
*
binutils
is
too
old
(
for
other
reasons
as
well
)
*/
ASSERT
((
__proc_info_end
-
__proc_info_begin
),
"missing CPU support"
)
ASSERT
((
__arch_info_end
-
__arch_info_begin
),
"no machine record defined"
)
arch/arm/lib/getuser.S
View file @
29ac878a
...
...
@@ -54,15 +54,6 @@ __get_user_4:
mov
r0
,
#
0
mov
pc
,
lr
.
global
__get_user_8
__get_user_8
:
5
:
ldrt
r2
,
[
r0
],
#
4
6
:
ldrt
r3
,
[
r0
]
mov
r0
,
#
0
mov
pc
,
lr
__get_user_bad_8
:
mov
r3
,
#
0
__get_user_bad
:
mov
r2
,
#
0
mov
r0
,
#-
EFAULT
...
...
@@ -73,6 +64,4 @@ __get_user_bad:
.
long
2
b
,
__get_user_bad
.
long
3
b
,
__get_user_bad
.
long
4
b
,
__get_user_bad
.
long
5
b
,
__get_user_bad_8
.
long
6
b
,
__get_user_bad_8
.
previous
arch/arm/mm/Makefile
View file @
29ac878a
...
...
@@ -51,4 +51,4 @@ obj-$(CONFIG_CPU_ARM1026) += proc-arm1026.o
obj-$(CONFIG_CPU_SA110)
+=
proc-sa110.o
obj-$(CONFIG_CPU_SA1100)
+=
proc-sa1100.o
obj-$(CONFIG_CPU_XSCALE)
+=
proc-xscale.o
obj-$(CONFIG_CPU_V6)
+=
proc-v6.o
blockops.o
obj-$(CONFIG_CPU_V6)
+=
proc-v6.o
arch/arm/mm/blockops.c
deleted
100644 → 0
View file @
54c4e6b5
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <asm/memory.h>
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
extern
struct
cpu_cache_fns
blk_cache_fns
;
#define HARVARD_CACHE
/*
* blk_flush_kern_dcache_page(kaddr)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
* - kaddr - kernel address (guaranteed to be page aligned)
*/
static
void
__attribute__
((
naked
))
blk_flush_kern_dcache_page
(
void
*
kaddr
)
{
asm
(
"add r1, r0, %0
\n
\
sub r1, r1, %1
\n
\
1: .word 0xec401f0e @ mcrr p15, 0, r0, r1, c14, 0 @ blocking
\n
\
mov r0, #0
\n
\
mcr p15, 0, r0, c7, c5, 0
\n
\
mcr p15, 0, r0, c7, c10, 4
\n
\
mov pc, lr"
:
:
"I"
(
PAGE_SIZE
),
"I"
(
L1_CACHE_BYTES
));
}
/*
* blk_dma_inv_range(start,end)
*
* Invalidate the data cache within the specified region; we will
* be performing a DMA operation in this region and we want to
* purge old data in the cache.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
static
void
__attribute__
((
naked
))
blk_dma_inv_range_unified
(
unsigned
long
start
,
unsigned
long
end
)
{
asm
(
"tst r0, %0
\n
\
mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
\n
\
tst r1, %0
\n
\
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
\n
\
.word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking
\n
\
mov r0, #0
\n
\
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
\n
\
mov pc, lr"
:
:
"I"
(
L1_CACHE_BYTES
-
1
));
}
static
void
__attribute__
((
naked
))
blk_dma_inv_range_harvard
(
unsigned
long
start
,
unsigned
long
end
)
{
asm
(
"tst r0, %0
\n
\
mcrne p15, 0, r0, c7, c10, 1 @ clean D line
\n
\
tst r1, %0
\n
\
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
\n
\
.word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking
\n
\
mov r0, #0
\n
\
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
\n
\
mov pc, lr"
:
:
"I"
(
L1_CACHE_BYTES
-
1
));
}
/*
* blk_dma_clean_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
static
void
__attribute__
((
naked
))
blk_dma_clean_range
(
unsigned
long
start
,
unsigned
long
end
)
{
asm
(
".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0 @ blocking
\n
\
mov r0, #0
\n
\
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
\n
\
mov pc, lr"
);
}
/*
* blk_dma_flush_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
static
void
__attribute__
((
naked
))
blk_dma_flush_range
(
unsigned
long
start
,
unsigned
long
end
)
{
asm
(
".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0 @ blocking
\n
\
mov pc, lr"
);
}
static
int
blockops_trap
(
struct
pt_regs
*
regs
,
unsigned
int
instr
)
{
regs
->
ARM_r4
|=
regs
->
ARM_r2
;
regs
->
ARM_pc
+=
4
;
return
0
;
}
static
char
*
func
[]
=
{
"Prefetch data range"
,
"Clean+Invalidate data range"
,
"Clean data range"
,
"Invalidate data range"
,
"Invalidate instr range"
};
static
struct
undef_hook
blockops_hook
__initdata
=
{
.
instr_mask
=
0x0fffffd0
,
.
instr_val
=
0x0c401f00
,
.
cpsr_mask
=
PSR_T_BIT
,
.
cpsr_val
=
0
,
.
fn
=
blockops_trap
,
};
static
int
__init
blockops_check
(
void
)
{
register
unsigned
int
err
asm
(
"r4"
)
=
0
;
unsigned
int
err_pos
=
1
;
unsigned
int
cache_type
;
int
i
;
asm
(
"mrc p15, 0, %0, c0, c0, 1"
:
"=r"
(
cache_type
));
printk
(
"Checking V6 block cache operations:
\n
"
);
register_undef_hook
(
&
blockops_hook
);
__asm__
(
"mov r0, %0
\n\t
"
"mov r1, %1
\n\t
"
"mov r2, #1
\n\t
"
".word 0xec401f2c @ mcrr p15, 0, r1, r0, c12, 2
\n\t
"
"mov r2, #2
\n\t
"
".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0
\n\t
"
"mov r2, #4
\n\t
"
".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0
\n\t
"
"mov r2, #8
\n\t
"
".word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0
\n\t
"
"mov r2, #16
\n\t
"
".word 0xec401f05 @ mcrr p15, 0, r1, r0, c5, 0
\n\t
"
:
:
"r"
(
PAGE_OFFSET
),
"r"
(
PAGE_OFFSET
+
128
)
:
"r0"
,
"r1"
,
"r2"
);
unregister_undef_hook
(
&
blockops_hook
);
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
func
);
i
++
,
err_pos
<<=
1
)
printk
(
"%30s: %ssupported
\n
"
,
func
[
i
],
err
&
err_pos
?
"not "
:
""
);
if
((
err
&
8
)
==
0
)
{
printk
(
" --> Using %s block cache invalidate
\n
"
,
cache_type
&
(
1
<<
24
)
?
"harvard"
:
"unified"
);
if
(
cache_type
&
(
1
<<
24
))
cpu_cache
.
dma_inv_range
=
blk_dma_inv_range_harvard
;
else
cpu_cache
.
dma_inv_range
=
blk_dma_inv_range_unified
;
}
if
((
err
&
4
)
==
0
)
{
printk
(
" --> Using block cache clean
\n
"
);
cpu_cache
.
dma_clean_range
=
blk_dma_clean_range
;
}
if
((
err
&
2
)
==
0
)
{
printk
(
" --> Using block cache clean+invalidate
\n
"
);
cpu_cache
.
dma_flush_range
=
blk_dma_flush_range
;
cpu_cache
.
flush_kern_dcache_page
=
blk_flush_kern_dcache_page
;
}
return
0
;
}
__initcall
(
blockops_check
);
arch/arm/mm/init.c
View file @
29ac878a
...
...
@@ -420,7 +420,8 @@ static void __init bootmem_init(struct meminfo *mi)
* Set up device the mappings. Since we clear out the page tables for all
* mappings above VMALLOC_END, we will remove any debug device mappings.
* This means you have to be careful how you debug this function, or any
* called function. (Do it by code inspection!)
* called function. This means you can't use any function or debugging
* method which may touch any device, otherwise the kernel _will_ crash.
*/
static
void
__init
devicemaps_init
(
struct
machine_desc
*
mdesc
)
{
...
...
@@ -428,6 +429,12 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
unsigned
long
addr
;
void
*
vectors
;
/*
* Allocate the vector page early.
*/
vectors
=
alloc_bootmem_low_pages
(
PAGE_SIZE
);
BUG_ON
(
!
vectors
);
for
(
addr
=
VMALLOC_END
;
addr
;
addr
+=
PGDIR_SIZE
)
pmd_clear
(
pmd_off_k
(
addr
));
...
...
@@ -461,12 +468,6 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
create_mapping
(
&
map
);
#endif
flush_cache_all
();
local_flush_tlb_all
();
vectors
=
alloc_bootmem_low_pages
(
PAGE_SIZE
);
BUG_ON
(
!
vectors
);
/*
* Create a mapping for the machine vectors at the high-vectors
* location (0xffff0000). If we aren't using high-vectors, also
...
...
@@ -491,12 +492,13 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
mdesc
->
map_io
();
/*
* Finally flush the
tlb again - this ensures
that we're in a
* consistent state wrt the writebuffer
if the writebuffer needs
*
draining. After this point, we can start to touch devices
* again.
* Finally flush the
caches and tlb to ensure
that we're in a
* consistent state wrt the writebuffer
. This also ensures that
*
any write-allocated cache lines in the vector page are written
*
back. After this point, we can start to touch devices
again.
*/
local_flush_tlb_all
();
flush_cache_all
();
}
/*
...
...
arch/arm/mm/ioremap.c
View file @
29ac878a
...
...
@@ -130,8 +130,7 @@ remap_area_pages(unsigned long start, unsigned long phys_addr,
* mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
*/
void
__iomem
*
__ioremap
(
unsigned
long
phys_addr
,
size_t
size
,
unsigned
long
flags
,
unsigned
long
align
)
__ioremap
(
unsigned
long
phys_addr
,
size_t
size
,
unsigned
long
flags
)
{
void
*
addr
;
struct
vm_struct
*
area
;
...
...
drivers/mtd/maps/ipaq-flash.c
View file @
29ac878a
...
...
@@ -246,7 +246,7 @@ int __init ipaq_mtd_init(void)
ipaq_map
[
i
].
size
=
h3xxx_max_flash_size
;
ipaq_map
[
i
].
set_vpp
=
h3xxx_set_vpp
;
ipaq_map
[
i
].
phys
=
cs_phys
[
i
];
ipaq_map
[
i
].
virt
=
__ioremap
(
cs_phys
[
i
],
0x04000000
,
0
,
1
);
ipaq_map
[
i
].
virt
=
ioremap
(
cs_phys
[
i
],
0x04000000
);
if
(
machine_is_h3100
()
||
machine_is_h1900
())
ipaq_map
[
i
].
bankwidth
=
2
;
}
...
...
@@ -280,7 +280,7 @@ int __init ipaq_mtd_init(void)
nb_parts
=
ARRAY_SIZE
(
jornada_partitions
);
ipaq_map
[
0
].
size
=
jornada_max_flash_size
;
ipaq_map
[
0
].
set_vpp
=
jornada56x_set_vpp
;
ipaq_map
[
0
].
virt
=
(
__u32
)
__ioremap
(
0x0
,
0x04000000
,
0
,
1
);
ipaq_map
[
0
].
virt
=
(
__u32
)
ioremap
(
0x0
,
0x04000000
);
}
#endif
#ifdef CONFIG_SA1100_JORNADA720
...
...
@@ -442,7 +442,7 @@ static int __init h1900_special_case(void)
ipaq_map
[
0
].
size
=
0x80000
;
ipaq_map
[
0
].
set_vpp
=
h3xxx_set_vpp
;
ipaq_map
[
0
].
phys
=
0x0
;
ipaq_map
[
0
].
virt
=
__ioremap
(
0x0
,
0x04000000
,
0
,
1
);
ipaq_map
[
0
].
virt
=
ioremap
(
0x0
,
0x04000000
);
ipaq_map
[
0
].
bankwidth
=
2
;
printk
(
KERN_NOTICE
"iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.
\n
"
,
ipaq_map
[
0
].
bankwidth
*
8
,
ipaq_map
[
0
].
virt
);
...
...
drivers/mtd/nand/h1910.c
View file @
29ac878a
...
...
@@ -112,7 +112,7 @@ static int __init h1910_init (void)
if
(
!
machine_is_h1900
())
return
-
ENODEV
;
nandaddr
=
__ioremap
(
0x08000000
,
0x1000
,
0
,
1
);
nandaddr
=
ioremap
(
0x08000000
,
0x1000
);
if
(
!
nandaddr
)
{
printk
(
"Failed to ioremap nand flash.
\n
"
);
return
-
ENOMEM
;
...
...
include/asm-arm/arch-ixp4xx/io.h
View file @
29ac878a
...
...
@@ -59,11 +59,10 @@ extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
* fallback to the default.
*/
static
inline
void
__iomem
*
__ixp4xx_ioremap
(
unsigned
long
addr
,
size_t
size
,
unsigned
long
flags
,
unsigned
long
align
)
__ixp4xx_ioremap
(
unsigned
long
addr
,
size_t
size
,
unsigned
long
flags
)
{
extern
void
__iomem
*
__ioremap
(
unsigned
long
,
size_t
,
unsigned
long
,
unsigned
long
);
if
((
addr
<
0x48000000
)
||
(
addr
>
0x4fffffff
))
return
__ioremap
(
addr
,
size
,
flags
,
align
);
return
__ioremap
(
addr
,
size
,
flags
);
return
(
void
*
)
addr
;
}
...
...
@@ -71,13 +70,11 @@ __ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags, unsigned
static
inline
void
__ixp4xx_iounmap
(
void
__iomem
*
addr
)
{
extern
void
__iounmap
(
void
__iomem
*
addr
);
if
((
u32
)
addr
>=
VMALLOC_START
)
__iounmap
(
addr
);
}
#define __arch_ioremap(a, s, f
, x) __ixp4xx_ioremap(a, s, f, x
)
#define __arch_ioremap(a, s, f
) __ixp4xx_ioremap(a, s, f
)
#define __arch_iounmap(a) __ixp4xx_iounmap(a)
#define writeb(v, p) __ixp4xx_writeb(v, p)
...
...
include/asm-arm/io.h
View file @
29ac878a
...
...
@@ -54,6 +54,12 @@ extern void __raw_readsl(void __iomem *addr, void *data, int longlen);
#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
#define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a))
/*
* Architecture ioremap implementation.
*/
extern
void
__iomem
*
__ioremap
(
unsigned
long
,
size_t
,
unsigned
long
);
extern
void
__iounmap
(
void
__iomem
*
addr
);
/*
* Bad read/write accesses...
*/
...
...
@@ -256,18 +262,15 @@ check_signature(void __iomem *io_addr, const unsigned char *signature,
* ioremap takes a PCI memory address, as specified in
* Documentation/IO-mapping.txt.
*/
extern
void
__iomem
*
__ioremap
(
unsigned
long
,
size_t
,
unsigned
long
,
unsigned
long
);
extern
void
__iounmap
(
void
__iomem
*
addr
);
#ifndef __arch_ioremap
#define ioremap(cookie,size) __ioremap(cookie,size,0
,1
)
#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0
,1
)
#define ioremap_cached(cookie,size) __ioremap(cookie,size,L_PTE_CACHEABLE
,1
)
#define ioremap(cookie,size) __ioremap(cookie,size,0)
#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0)
#define ioremap_cached(cookie,size) __ioremap(cookie,size,L_PTE_CACHEABLE)
#define iounmap(cookie) __iounmap(cookie)
#else
#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0
,1
)
#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0
,1
)
#define ioremap_cached(cookie,size) __arch_ioremap((cookie),(size),L_PTE_CACHEABLE
,1
)
#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0)
#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0)
#define ioremap_cached(cookie,size) __arch_ioremap((cookie),(size),L_PTE_CACHEABLE)
#define iounmap(cookie) __arch_iounmap(cookie)
#endif
...
...
include/asm-arm/uaccess.h
View file @
29ac878a
...
...
@@ -100,7 +100,6 @@ static inline void set_fs (mm_segment_t fs)
extern
int
__get_user_1
(
void
*
);
extern
int
__get_user_2
(
void
*
);
extern
int
__get_user_4
(
void
*
);
extern
int
__get_user_8
(
void
*
);
extern
int
__get_user_bad
(
void
);
#define __get_user_x(__r2,__p,__e,__s,__i...) \
...
...
@@ -114,7 +113,7 @@ extern int __get_user_bad(void);
#define get_user(x,p) \
({ \
const register typeof(*(p)) __user *__p asm("r0") = (p);\
register
typeof(*(p))
__r2 asm("r2"); \
register
unsigned int
__r2 asm("r2"); \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
...
...
@@ -126,12 +125,9 @@ extern int __get_user_bad(void);
case 4: \
__get_user_x(__r2, __p, __e, 4, "lr"); \
break; \
case 8: \
__get_user_x(__r2, __p, __e, 8, "lr"); \
break; \
default: __e = __get_user_bad(); break; \
} \
x =
__r2;
\
x =
(typeof(*(p))) __r2;
\
__e; \
})
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment