Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
cf64c2a9
Commit
cf64c2a9
authored
Feb 26, 2021
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'work.sparc32' of
git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
parents
b9d62433
73686e78
Changes
14
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
193 additions
and
526 deletions
+193
-526
arch/sparc/include/asm/elf_64.h
arch/sparc/include/asm/elf_64.h
+0
-1
arch/sparc/include/asm/extable.h
arch/sparc/include/asm/extable.h
+2
-2
arch/sparc/include/asm/uaccess.h
arch/sparc/include/asm/uaccess.h
+3
-0
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_32.h
+0
-38
arch/sparc/include/asm/uaccess_64.h
arch/sparc/include/asm/uaccess_64.h
+0
-1
arch/sparc/kernel/unaligned_32.c
arch/sparc/kernel/unaligned_32.c
+5
-5
arch/sparc/lib/checksum_32.S
arch/sparc/lib/checksum_32.S
+27
-37
arch/sparc/lib/copy_user.S
arch/sparc/lib/copy_user.S
+112
-203
arch/sparc/lib/memset.S
arch/sparc/lib/memset.S
+33
-54
arch/sparc/mm/Makefile
arch/sparc/mm/Makefile
+1
-1
arch/sparc/mm/extable.c
arch/sparc/mm/extable.c
+0
-107
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_32.c
+10
-70
arch/sparc/mm/mm_32.h
arch/sparc/mm/mm_32.h
+0
-2
lib/extable.c
lib/extable.c
+0
-5
No files found.
arch/sparc/include/asm/elf_64.h
View file @
cf64c2a9
...
...
@@ -8,7 +8,6 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/extable_64.h>
#include <asm/spitfire.h>
#include <asm/adi.h>
...
...
arch/sparc/include/asm/extable
_64
.h
→
arch/sparc/include/asm/extable.h
View file @
cf64c2a9
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_EXTABLE
64
_H
#define __ASM_EXTABLE
64
_H
#ifndef __ASM_EXTABLE_H
#define __ASM_EXTABLE_H
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
...
...
arch/sparc/include/asm/uaccess.h
View file @
cf64c2a9
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ___ASM_SPARC_UACCESS_H
#define ___ASM_SPARC_UACCESS_H
#include <asm/extable.h>
#if defined(__sparc__) && defined(__arch64__)
#include <asm/uaccess_64.h>
#else
...
...
arch/sparc/include/asm/uaccess_32.h
View file @
cf64c2a9
...
...
@@ -13,9 +13,6 @@
#include <asm/processor.h>
#define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE
/* Sparc is not segmented, however we need to be able to fool access_ok()
* when doing system calls from kernel mode legitimately.
*
...
...
@@ -40,36 +37,6 @@
#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
#define access_ok(addr, size) __access_ok((unsigned long)(addr), size)
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*
* There is a special way how to put a range of potentially faulting
* insns (like twenty ldd/std's with now intervening other instructions)
* You specify address of first in insn and 0 in fixup and in the next
* exception_table_entry you specify last potentially faulting insn + 1
* and in fixup the routine which should handle the fault.
* That fixup code will get
* (faulting_insn_address - first_insn_in_the_range_address)/4
* in %g2 (ie. index of the faulting instruction in the range).
*/
struct
exception_table_entry
{
unsigned
long
insn
,
fixup
;
};
/* Returns 0 if exception not found and fixup otherwise. */
unsigned
long
search_extables_range
(
unsigned
long
addr
,
unsigned
long
*
g2
);
/* Uh, these should become the main single-value transfer routines..
* They automatically use the right size if we just have the right
* pointer type..
...
...
@@ -252,12 +219,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size)
unsigned
long
ret
;
__asm__
__volatile__
(
".section __ex_table,#alloc
\n\t
"
".align 4
\n\t
"
".word 1f,3
\n\t
"
".previous
\n\t
"
"mov %2, %%o1
\n
"
"1:
\n\t
"
"call __bzero
\n\t
"
" mov %1, %%o0
\n\t
"
"mov %%o0, %0
\n
"
...
...
arch/sparc/include/asm/uaccess_64.h
View file @
cf64c2a9
...
...
@@ -10,7 +10,6 @@
#include <linux/string.h>
#include <asm/asi.h>
#include <asm/spitfire.h>
#include <asm/extable_64.h>
#include <asm/processor.h>
...
...
arch/sparc/kernel/unaligned_32.c
View file @
cf64c2a9
...
...
@@ -16,6 +16,7 @@
#include <linux/uaccess.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/extable.h>
#include <asm/setup.h>
...
...
@@ -213,10 +214,10 @@ static inline int ok_for_kernel(unsigned int insn)
static
void
kernel_mna_trap_fault
(
struct
pt_regs
*
regs
,
unsigned
int
insn
)
{
unsigned
long
g2
=
regs
->
u_regs
[
UREG_G2
];
unsigned
long
fixup
=
search_extables_range
(
regs
->
pc
,
&
g2
);
const
struct
exception_table_entry
*
entry
;
if
(
!
fixup
)
{
entry
=
search_exception_tables
(
regs
->
pc
);
if
(
!
entry
)
{
unsigned
long
address
=
compute_effective_address
(
regs
,
insn
);
if
(
address
<
PAGE_SIZE
)
{
printk
(
KERN_ALERT
"Unable to handle kernel NULL pointer dereference in mna handler"
);
...
...
@@ -232,9 +233,8 @@ static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
die_if_kernel
(
"Oops"
,
regs
);
/* Not reached */
}
regs
->
pc
=
fixup
;
regs
->
pc
=
entry
->
fixup
;
regs
->
npc
=
regs
->
pc
+
4
;
regs
->
u_regs
[
UREG_G2
]
=
g2
;
}
asmlinkage
void
kernel_unaligned_trap
(
struct
pt_regs
*
regs
,
unsigned
int
insn
)
...
...
arch/sparc/lib/checksum_32.S
View file @
cf64c2a9
...
...
@@ -155,13 +155,6 @@ cpout: retl ! get outta here
.
text
; \
.
align
4
#define EXT(start,end) \
.
section
__ex_table
,
ALLOC
; \
.
align
4
; \
.
word
start
,
0
,
end
,
cc_fault
; \
.
text
; \
.
align
4
/
*
This
aligned
version
executes
typically
in
8
.5
superscalar
cycles
,
this
*
is
the
best
I
can
do
.
I
say
8
.5
because
the
final
add
will
pair
with
*
the
next
ldd
in
the
main
unrolled
loop
.
Thus
the
pipe
is
always
full
.
...
...
@@ -169,20 +162,20 @@ cpout: retl ! get outta here
*
please
check
the
fixup
code
below
as
well
.
*/
#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd
[
src
+
off
+
0x00
],
t0
;
\
ldd
[
src
+
off
+
0x08
],
t2
;
\
EX
(
ldd
[
src
+
off
+
0x00
],
t0
)
;
\
EX
(
ldd
[
src
+
off
+
0x08
],
t2
)
;
\
addxcc
t0
,
sum
,
sum
; \
ldd
[
src
+
off
+
0x10
],
t4
;
\
EX
(
ldd
[
src
+
off
+
0x10
],
t4
)
;
\
addxcc
t1
,
sum
,
sum
; \
ldd
[
src
+
off
+
0x18
],
t6
;
\
EX
(
ldd
[
src
+
off
+
0x18
],
t6
)
;
\
addxcc
t2
,
sum
,
sum
; \
std
t0
,
[
dst
+
off
+
0x00
]
;
\
EX
(
std
t0
,
[
dst
+
off
+
0x00
])
;
\
addxcc
t3
,
sum
,
sum
; \
std
t2
,
[
dst
+
off
+
0x08
]
;
\
EX
(
std
t2
,
[
dst
+
off
+
0x08
])
;
\
addxcc
t4
,
sum
,
sum
; \
std
t4
,
[
dst
+
off
+
0x10
]
;
\
EX
(
std
t4
,
[
dst
+
off
+
0x10
])
;
\
addxcc
t5
,
sum
,
sum
; \
std
t6
,
[
dst
+
off
+
0x18
]
;
\
EX
(
std
t6
,
[
dst
+
off
+
0x18
])
;
\
addxcc
t6
,
sum
,
sum
; \
addxcc
t7
,
sum
,
sum
;
...
...
@@ -191,39 +184,39 @@ cpout: retl ! get outta here
*
Viking
MXCC
into
streaming
mode
.
Ho
hum
...
*/
#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd
[
src
+
off
+
0x00
],
t0
;
\
ldd
[
src
+
off
+
0x08
],
t2
;
\
ldd
[
src
+
off
+
0x10
],
t4
;
\
ldd
[
src
+
off
+
0x18
],
t6
;
\
st
t0
,
[
dst
+
off
+
0x00
]
;
\
EX
(
ldd
[
src
+
off
+
0x00
],
t0
)
;
\
EX
(
ldd
[
src
+
off
+
0x08
],
t2
)
;
\
EX
(
ldd
[
src
+
off
+
0x10
],
t4
)
;
\
EX
(
ldd
[
src
+
off
+
0x18
],
t6
)
;
\
EX
(
st
t0
,
[
dst
+
off
+
0x00
])
;
\
addxcc
t0
,
sum
,
sum
; \
st
t1
,
[
dst
+
off
+
0x04
]
;
\
EX
(
st
t1
,
[
dst
+
off
+
0x04
])
;
\
addxcc
t1
,
sum
,
sum
; \
st
t2
,
[
dst
+
off
+
0x08
]
;
\
EX
(
st
t2
,
[
dst
+
off
+
0x08
])
;
\
addxcc
t2
,
sum
,
sum
; \
st
t3
,
[
dst
+
off
+
0x0c
]
;
\
EX
(
st
t3
,
[
dst
+
off
+
0x0c
])
;
\
addxcc
t3
,
sum
,
sum
; \
st
t4
,
[
dst
+
off
+
0x10
]
;
\
EX
(
st
t4
,
[
dst
+
off
+
0x10
])
;
\
addxcc
t4
,
sum
,
sum
; \
st
t5
,
[
dst
+
off
+
0x14
]
;
\
EX
(
st
t5
,
[
dst
+
off
+
0x14
])
;
\
addxcc
t5
,
sum
,
sum
; \
st
t6
,
[
dst
+
off
+
0x18
]
;
\
EX
(
st
t6
,
[
dst
+
off
+
0x18
])
;
\
addxcc
t6
,
sum
,
sum
; \
st
t7
,
[
dst
+
off
+
0x1c
]
;
\
EX
(
st
t7
,
[
dst
+
off
+
0x1c
])
;
\
addxcc
t7
,
sum
,
sum
;
/
*
Yuck
,
6
superscalar
cycles
...
*/
#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
ldd
[
src
-
off
-
0x08
],
t0
;
\
ldd
[
src
-
off
-
0x00
],
t2
;
\
EX
(
ldd
[
src
-
off
-
0x08
],
t0
)
;
\
EX
(
ldd
[
src
-
off
-
0x00
],
t2
)
;
\
addxcc
t0
,
sum
,
sum
; \
st
t0
,
[
dst
-
off
-
0x08
]
;
\
EX
(
st
t0
,
[
dst
-
off
-
0x08
])
;
\
addxcc
t1
,
sum
,
sum
; \
st
t1
,
[
dst
-
off
-
0x04
]
;
\
EX
(
st
t1
,
[
dst
-
off
-
0x04
])
;
\
addxcc
t2
,
sum
,
sum
; \
st
t2
,
[
dst
-
off
-
0x00
]
;
\
EX
(
st
t2
,
[
dst
-
off
-
0x00
])
;
\
addxcc
t3
,
sum
,
sum
; \
st
t3
,
[
dst
-
off
+
0x04
]
;
EX
(
st
t3
,
[
dst
-
off
+
0x04
])
;
/
*
Handle
the
end
cruft
code
out
of
band
for
better
cache
patterns
.
*/
cc_end_cruft
:
...
...
@@ -331,7 +324,6 @@ __csum_partial_copy_sparc_generic:
CSUMCOPY_BIGCHUNK
(%
o0
,%
o1
,%
g7
,0
x20
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK
(%
o0
,%
o1
,%
g7
,0
x40
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK
(%
o0
,%
o1
,%
g7
,0
x60
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
10
:
EXT
(5
b
,
10
b
)
!
note
for
exception
handling
sub
%
g1
,
128
,
%
g1
!
detract
from
length
addx
%
g0
,
%
g7
,
%
g7
!
add
in
last
carry
bit
andcc
%
g1
,
0xffffff80
,
%
g0
!
more
to
csum
?
...
...
@@ -356,8 +348,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK
(%
o0
,%
o1
,%
g7
,0
x28
,%
g2
,%
g3
,%
g4
,%
g5
)
CSUMCOPY_LASTCHUNK
(%
o0
,%
o1
,%
g7
,0
x18
,%
g2
,%
g3
,%
g4
,%
g5
)
CSUMCOPY_LASTCHUNK
(%
o0
,%
o1
,%
g7
,0
x08
,%
g2
,%
g3
,%
g4
,%
g5
)
12
:
EXT
(
cctbl
,
12
b
)
!
note
for
exception
table
handling
addx
%
g0
,
%
g7
,
%
g7
12
:
addx
%
g0
,
%
g7
,
%
g7
andcc
%
o3
,
0xf
,
%
g0
!
check
for
low
bits
set
ccte
:
bne
cc_end_cruft
!
something
left
,
handle
it
out
of
band
andcc
%
o3
,
8
,
%
g0
!
begin
checks
for
that
code
...
...
@@ -367,7 +358,6 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o
CSUMCOPY_BIGCHUNK_ALIGNED
(%
o0
,%
o1
,%
g7
,0
x20
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK_ALIGNED
(%
o0
,%
o1
,%
g7
,0
x40
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK_ALIGNED
(%
o0
,%
o1
,%
g7
,0
x60
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
11
:
EXT
(
ccdbl
,
11
b
)
!
note
for
exception
table
handling
sub
%
g1
,
128
,
%
g1
!
detract
from
length
addx
%
g0
,
%
g7
,
%
g7
!
add
in
last
carry
bit
andcc
%
g1
,
0xffffff80
,
%
g0
!
more
to
csum
?
...
...
arch/sparc/lib/copy_user.S
View file @
cf64c2a9
This diff is collapsed.
Click to expand it.
arch/sparc/lib/memset.S
View file @
cf64c2a9
...
...
@@ -19,7 +19,7 @@
98
:
x
,
y
; \
.
section
.
fixup
,
ALLOC
,
EXECINSTR
; \
.
align
4
; \
99
:
ba
30
f
; \
99
:
retl
; \
a
,
b
,
%
o0
; \
.
section
__ex_table
,
ALLOC
; \
.
align
4
; \
...
...
@@ -27,35 +27,44 @@
.
text
; \
.
align
4
#define EXT(start,end,handler) \
#define STORE(source, base, offset, n) \
98
:
std
source
,
[
base
+
offset
+
n
]
; \
.
section
.
fixup
,
ALLOC
,
EXECINSTR
; \
.
align
4
; \
99
:
ba
30
f
; \
sub
%
o3
,
n
-
offset
,
%
o3
; \
.
section
__ex_table
,
ALLOC
; \
.
align
4
; \
.
word
start
,
0
,
end
,
handler
;
\
.
word
98
b
,
99
b
;
\
.
text
; \
.
align
4
.
align
4
;
#define STORE_LAST(source, base, offset, n) \
EX
(
std
source
,
[
base
-
offset
-
n
],
\
add
%
o1
,
offset
+
n
)
;
/*
Please
don
't change these macros, unless you change the logic
*
in
the
.
fixup
section
below
as
well
.
*
Store
64
bytes
at
(
BASE
+
OFFSET
)
using
value
SOURCE
.
*/
#define ZERO_BIG_BLOCK(base, offset, source) \
std
source
,
[
base
+
offset
+
0x00
]
;
\
std
source
,
[
base
+
offset
+
0x08
]
;
\
std
source
,
[
base
+
offset
+
0x10
]
;
\
std
source
,
[
base
+
offset
+
0x18
]
;
\
std
source
,
[
base
+
offset
+
0x20
]
;
\
std
source
,
[
base
+
offset
+
0x28
]
;
\
std
source
,
[
base
+
offset
+
0x30
]
;
\
std
source
,
[
base
+
offset
+
0x38
]
;
STORE
(
source
,
base
,
offset
,
0x00
)
;
\
STORE
(
source
,
base
,
offset
,
0x08
)
;
\
STORE
(
source
,
base
,
offset
,
0x10
)
;
\
STORE
(
source
,
base
,
offset
,
0x18
)
;
\
STORE
(
source
,
base
,
offset
,
0x20
)
;
\
STORE
(
source
,
base
,
offset
,
0x28
)
;
\
STORE
(
source
,
base
,
offset
,
0x30
)
;
\
STORE
(
source
,
base
,
offset
,
0x38
)
;
#define ZERO_LAST_BLOCKS(base, offset, source) \
std
source
,
[
base
-
offset
-
0x38
]
;
\
std
source
,
[
base
-
offset
-
0x30
]
; \
std
source
,
[
base
-
offset
-
0x28
]
;
\
std
source
,
[
base
-
offset
-
0x20
]
;
\
std
source
,
[
base
-
offset
-
0x18
]
;
\
std
source
,
[
base
-
offset
-
0x10
]
;
\
std
source
,
[
base
-
offset
-
0x08
]
;
\
std
source
,
[
base
-
offset
-
0x00
]
;
STORE_LAST
(
source
,
base
,
offset
,
0x38
)
;
\
STORE_LAST
(
source
,
base
,
offset
,
0x30
)
; \
STORE_LAST
(
source
,
base
,
offset
,
0x28
)
;
\
STORE_LAST
(
source
,
base
,
offset
,
0x20
)
;
\
STORE_LAST
(
source
,
base
,
offset
,
0x18
)
;
\
STORE_LAST
(
source
,
base
,
offset
,
0x10
)
;
\
STORE_LAST
(
source
,
base
,
offset
,
0x08
)
;
\
STORE_LAST
(
source
,
base
,
offset
,
0x00
)
;
.
text
.
align
4
...
...
@@ -68,8 +77,6 @@ __bzero_begin:
.
globl
memset
EXPORT_SYMBOL
(
__bzero
)
EXPORT_SYMBOL
(
memset
)
.
globl
__memset_start
,
__memset_end
__memset_start
:
memset
:
mov
%
o0
,
%
g1
mov
1
,
%
g4
...
...
@@ -122,8 +129,6 @@ __bzero:
ZERO_BIG_BLOCK
(%
o0
,
0x00
,
%
g2
)
subcc
%
o3
,
128
,
%
o3
ZERO_BIG_BLOCK
(%
o0
,
0x40
,
%
g2
)
11
:
EXT
(10
b
,
11
b
,
20
f
)
bne
10
b
add
%
o0
,
128
,
%
o0
...
...
@@ -138,11 +143,9 @@ __bzero:
jmp
%
o4
add
%
o0
,
%
o2
,
%
o0
12
:
ZERO_LAST_BLOCKS
(%
o0
,
0x48
,
%
g2
)
ZERO_LAST_BLOCKS
(%
o0
,
0x08
,
%
g2
)
13
:
EXT
(12
b
,
13
b
,
21
f
)
be
8
f
andcc
%
o1
,
4
,
%
g0
...
...
@@ -182,37 +185,13 @@ __bzero:
5
:
retl
clr
%
o0
__memset_end
:
.
section
.
fixup
,#
alloc
,#
execinstr
.
align
4
20
:
cmp
%
g2
,
8
bleu
1
f
30
:
and
%
o1
,
0x7f
,
%
o1
sub
%
g2
,
9
,
%
g2
add
%
o3
,
64
,
%
o3
1
:
sll
%
g2
,
3
,
%
g2
retl
add
%
o3
,
%
o1
,
%
o0
b
30
f
sub
%
o0
,
%
g2
,
%
o0
21
:
mov
8
,
%
o0
and
%
o1
,
7
,
%
o1
sub
%
o0
,
%
g2
,
%
o0
sll
%
o0
,
3
,
%
o0
b
30
f
add
%
o0
,
%
o1
,
%
o0
30
:
/*
%
o4
is
faulting
address
,
%
o5
is
%
pc
where
fault
occurred
*/
save
%
sp
,
-
104
,
%
sp
mov
%
i5
,
%
o0
mov
%
i7
,
%
o1
call
lookup_fault
mov
%
i4
,
%
o2
ret
restore
.
globl
__bzero_end
__bzero_end
:
arch/sparc/mm/Makefile
View file @
cf64c2a9
...
...
@@ -8,7 +8,7 @@ ccflags-y := -Werror
obj-$(CONFIG_SPARC64)
+=
ultra.o tlb.o tsb.o
obj-y
+=
fault_
$(BITS)
.o
obj-y
+=
init_
$(BITS)
.o
obj-$(CONFIG_SPARC32)
+=
extable.o
srmmu.o iommu.o io-unit.o
obj-$(CONFIG_SPARC32)
+=
srmmu.o iommu.o io-unit.o
obj-$(CONFIG_SPARC32)
+=
srmmu_access.o
obj-$(CONFIG_SPARC32)
+=
hypersparc.o viking.o tsunami.o swift.o
obj-$(CONFIG_SPARC32)
+=
leon_mm.o
...
...
arch/sparc/mm/extable.c
deleted
100644 → 0
View file @
b9d62433
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sparc/mm/extable.c
*/
#include <linux/module.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
void
sort_extable
(
struct
exception_table_entry
*
start
,
struct
exception_table_entry
*
finish
)
{
}
/* Caller knows they are in a range if ret->fixup == 0 */
const
struct
exception_table_entry
*
search_extable
(
const
struct
exception_table_entry
*
base
,
const
size_t
num
,
unsigned
long
value
)
{
int
i
;
/* Single insn entries are encoded as:
* word 1: insn address
* word 2: fixup code address
*
* Range entries are encoded as:
* word 1: first insn address
* word 2: 0
* word 3: last insn address + 4 bytes
* word 4: fixup code address
*
* Deleted entries are encoded as:
* word 1: unused
* word 2: -1
*
* See asm/uaccess.h for more details.
*/
/* 1. Try to find an exact match. */
for
(
i
=
0
;
i
<
num
;
i
++
)
{
if
(
base
[
i
].
fixup
==
0
)
{
/* A range entry, skip both parts. */
i
++
;
continue
;
}
/* A deleted entry; see trim_init_extable */
if
(
base
[
i
].
fixup
==
-
1
)
continue
;
if
(
base
[
i
].
insn
==
value
)
return
&
base
[
i
];
}
/* 2. Try to find a range match. */
for
(
i
=
0
;
i
<
(
num
-
1
);
i
++
)
{
if
(
base
[
i
].
fixup
)
continue
;
if
(
base
[
i
].
insn
<=
value
&&
base
[
i
+
1
].
insn
>
value
)
return
&
base
[
i
];
i
++
;
}
return
NULL
;
}
#ifdef CONFIG_MODULES
/* We could memmove them around; easier to mark the trimmed ones. */
void
trim_init_extable
(
struct
module
*
m
)
{
unsigned
int
i
;
bool
range
;
for
(
i
=
0
;
i
<
m
->
num_exentries
;
i
+=
range
?
2
:
1
)
{
range
=
m
->
extable
[
i
].
fixup
==
0
;
if
(
within_module_init
(
m
->
extable
[
i
].
insn
,
m
))
{
m
->
extable
[
i
].
fixup
=
-
1
;
if
(
range
)
m
->
extable
[
i
+
1
].
fixup
=
-
1
;
}
if
(
range
)
i
++
;
}
}
#endif
/* CONFIG_MODULES */
/* Special extable search, which handles ranges. Returns fixup */
unsigned
long
search_extables_range
(
unsigned
long
addr
,
unsigned
long
*
g2
)
{
const
struct
exception_table_entry
*
entry
;
entry
=
search_exception_tables
(
addr
);
if
(
!
entry
)
return
0
;
/* Inside range? Fix g2 and return correct fixup */
if
(
!
entry
->
fixup
)
{
*
g2
=
(
addr
-
entry
->
insn
)
/
4
;
return
(
entry
+
1
)
->
fixup
;
}
return
entry
->
fixup
;
}
arch/sparc/mm/fault_32.c
View file @
cf64c2a9
...
...
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <linux/extable.h>
#include <asm/page.h>
#include <asm/openprom.h>
...
...
@@ -54,54 +55,6 @@ static void __noreturn unhandled_fault(unsigned long address,
die_if_kernel
(
"Oops"
,
regs
);
}
asmlinkage
int
lookup_fault
(
unsigned
long
pc
,
unsigned
long
ret_pc
,
unsigned
long
address
)
{
struct
pt_regs
regs
;
unsigned
long
g2
;
unsigned
int
insn
;
int
i
;
i
=
search_extables_range
(
ret_pc
,
&
g2
);
switch
(
i
)
{
case
3
:
/* load & store will be handled by fixup */
return
3
;
case
1
:
/* store will be handled by fixup, load will bump out */
/* for _to_ macros */
insn
=
*
((
unsigned
int
*
)
pc
);
if
((
insn
>>
21
)
&
1
)
return
1
;
break
;
case
2
:
/* load will be handled by fixup, store will bump out */
/* for _from_ macros */
insn
=
*
((
unsigned
int
*
)
pc
);
if
(
!
((
insn
>>
21
)
&
1
)
||
((
insn
>>
19
)
&
0x3f
)
==
15
)
return
2
;
break
;
default:
break
;
}
memset
(
&
regs
,
0
,
sizeof
(
regs
));
regs
.
pc
=
pc
;
regs
.
npc
=
pc
+
4
;
__asm__
__volatile__
(
"rd %%psr, %0
\n\t
"
"nop
\n\t
"
"nop
\n\t
"
"nop
\n
"
:
"=r"
(
regs
.
psr
));
unhandled_fault
(
address
,
current
,
&
regs
);
/* Not reached */
return
0
;
}
static
inline
void
show_signal_msg
(
struct
pt_regs
*
regs
,
int
sig
,
int
code
,
unsigned
long
address
,
struct
task_struct
*
tsk
)
...
...
@@ -162,8 +115,6 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
struct
vm_area_struct
*
vma
;
struct
task_struct
*
tsk
=
current
;
struct
mm_struct
*
mm
=
tsk
->
mm
;
unsigned
int
fixup
;
unsigned
long
g2
;
int
from_user
=
!
(
regs
->
psr
&
PSR_PS
);
int
code
;
vm_fault_t
fault
;
...
...
@@ -281,31 +232,20 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
/* Is this in ex_table? */
no_context:
g2
=
regs
->
u_regs
[
UREG_G2
];
if
(
!
from_user
)
{
fixup
=
search_extables_range
(
regs
->
pc
,
&
g2
);
/* Values below 10 are reserved for other things */
if
(
fixup
>
10
)
{
extern
const
unsigned
int
__memset_start
[];
extern
const
unsigned
int
__memset_end
[];
const
struct
exception_table_entry
*
entry
;
entry
=
search_exception_tables
(
regs
->
pc
);
#ifdef DEBUG_EXCEPTIONS
printk
(
"Exception: PC<%08lx> faddr<%08lx>
\n
"
,
regs
->
pc
,
address
);
printk
(
"EX_TABLE: insn<%08lx> fixup<%08x> g2<%08l
x>
\n
"
,
regs
->
pc
,
fixup
,
g2
);
printk
(
"EX_TABLE: insn<%08lx> fixup<%08
x>
\n
"
,
regs
->
pc
,
entry
->
fixup
);
#endif
if
((
regs
->
pc
>=
(
unsigned
long
)
__memset_start
&&
regs
->
pc
<
(
unsigned
long
)
__memset_end
))
{
regs
->
u_regs
[
UREG_I4
]
=
address
;
regs
->
u_regs
[
UREG_I5
]
=
regs
->
pc
;
}
regs
->
u_regs
[
UREG_G2
]
=
g2
;
regs
->
pc
=
fixup
;
regs
->
pc
=
entry
->
fixup
;
regs
->
npc
=
regs
->
pc
+
4
;
return
;
}
}
unhandled_fault
(
address
,
tsk
,
regs
);
do_exit
(
SIGKILL
);
...
...
arch/sparc/mm/mm_32.h
View file @
cf64c2a9
/* SPDX-License-Identifier: GPL-2.0 */
/* fault_32.c - visible as they are called from assembler */
asmlinkage
int
lookup_fault
(
unsigned
long
pc
,
unsigned
long
ret_pc
,
unsigned
long
address
);
asmlinkage
void
do_sparc_fault
(
struct
pt_regs
*
regs
,
int
text_fault
,
int
write
,
unsigned
long
address
);
...
...
lib/extable.c
View file @
cf64c2a9
...
...
@@ -21,7 +21,6 @@ static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
}
#endif
#ifndef ARCH_HAS_SORT_EXTABLE
#ifndef ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex NULL
#else
...
...
@@ -88,9 +87,6 @@ void trim_init_extable(struct module *m)
m
->
num_exentries
--
;
}
#endif
/* CONFIG_MODULES */
#endif
/* !ARCH_HAS_SORT_EXTABLE */
#ifndef ARCH_HAS_SEARCH_EXTABLE
static
int
cmp_ex_search
(
const
void
*
key
,
const
void
*
elt
)
{
...
...
@@ -120,4 +116,3 @@ search_extable(const struct exception_table_entry *base,
return
bsearch
(
&
value
,
base
,
num
,
sizeof
(
struct
exception_table_entry
),
cmp_ex_search
);
}
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment