Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
7de7e33b
Commit
7de7e33b
authored
Jun 08, 2002
by
Anton Blanchard
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ppc64: POWER4 lazy icache flushing
parent
35013e47
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
60 additions
and
22 deletions
+60
-22
arch/ppc64/kernel/head.S
arch/ppc64/kernel/head.S
+6
-5
arch/ppc64/kernel/htab.c
arch/ppc64/kernel/htab.c
+21
-3
arch/ppc64/kernel/pSeries_htab.c
arch/ppc64/kernel/pSeries_htab.c
+1
-3
arch/ppc64/kernel/pSeries_lpar.c
arch/ppc64/kernel/pSeries_lpar.c
+1
-3
arch/ppc64/mm/init.c
arch/ppc64/mm/init.c
+31
-8
No files found.
arch/ppc64/kernel/head.S
View file @
7de7e33b
...
...
@@ -575,7 +575,8 @@ stab_bolted_user_return:
bl
.
do_stab_SI
b
1
f
2
:
bl
.
do_hash_page_DSI
/*
Try
to
handle
as
hpte
fault
*/
2
:
li
r5
,
0x300
bl
.
do_hash_page_DSI
/*
Try
to
handle
as
hpte
fault
*/
1
:
ld
r4
,
_DAR
(
r1
)
ld
r5
,
_DSISR
(
r1
)
...
...
@@ -627,9 +628,8 @@ InstructionAccess_common:
bl
.
do_stab_SI
b
1
f
2
:
andis
.
r0
,
r23
,
0x4000
/*
no
pte
found
?
*/
beq
1
f
/*
if
so
,
try
to
put
a
PTE
*/
mr
r3
,
r22
/*
into
the
hash
table
*/
2
:
mr
r3
,
r22
li
r5
,
0x400
bl
.
do_hash_page_ISI
/*
Try
to
handle
as
hpte
fault
*/
1
:
mr
r4
,
r22
...
...
@@ -804,6 +804,7 @@ _GLOBAL(do_hash_page_DSI)
/
*
*
r3
contains
the
faulting
address
*
r4
contains
the
required
access
permissions
*
r5
contains
the
trap
number
*
*
at
return
r3
=
0
for
success
*/
...
...
@@ -1119,7 +1120,7 @@ _GLOBAL(save_remaining_regs)
rldimi
r22
,
r20
,
15
,
48
/*
Insert
desired
EE
value
*/
#endif
mtmsrd
r22
mtmsrd
r22
,
1
blr
...
...
arch/ppc64/kernel/htab.c
View file @
7de7e33b
...
...
@@ -195,7 +195,7 @@ static inline unsigned long computeHptePP(unsigned long pte)
* to be valid via Linux page tables, return 1. If handled return 0
*/
int
__hash_page
(
unsigned
long
ea
,
unsigned
long
access
,
unsigned
long
vsid
,
pte_t
*
ptep
)
pte_t
*
ptep
,
unsigned
long
trap
)
{
unsigned
long
va
,
vpn
;
unsigned
long
newpp
,
prpn
;
...
...
@@ -244,6 +244,24 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
newpp
=
computeHptePP
(
pte_val
(
new_pte
));
#define PPC64_HWNOEXEC (1 << 2)
/* We do lazy icache flushing on POWER4 */
if
(
__is_processor
(
PV_POWER4
)
&&
pfn_valid
(
pte_pfn
(
new_pte
)))
{
struct
page
*
page
=
pte_page
(
new_pte
);
/* page is dirty */
if
(
!
PageReserved
(
page
)
&&
!
test_bit
(
PG_arch_1
,
&
page
->
flags
))
{
if
(
trap
==
0x400
)
{
__flush_dcache_icache
(
page_address
(
page
));
set_bit
(
PG_arch_1
,
&
page
->
flags
);
}
else
{
newpp
|=
PPC64_HWNOEXEC
;
}
}
}
/* Check if pte already has an hpte (case 2) */
if
(
pte_val
(
old_pte
)
&
_PAGE_HASHPTE
)
{
/* There MIGHT be an HPTE for this pte */
...
...
@@ -317,7 +335,7 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
return
0
;
}
int
hash_page
(
unsigned
long
ea
,
unsigned
long
access
)
int
hash_page
(
unsigned
long
ea
,
unsigned
long
access
,
unsigned
long
trap
)
{
void
*
pgdir
;
unsigned
long
vsid
;
...
...
@@ -376,7 +394,7 @@ int hash_page(unsigned long ea, unsigned long access)
*/
spin_lock
(
&
mm
->
page_table_lock
);
ptep
=
find_linux_pte
(
pgdir
,
ea
);
ret
=
__hash_page
(
ea
,
access
,
vsid
,
ptep
);
ret
=
__hash_page
(
ea
,
access
,
vsid
,
ptep
,
trap
);
spin_unlock
(
&
mm
->
page_table_lock
);
return
ret
;
...
...
arch/ppc64/kernel/pSeries_htab.c
View file @
7de7e33b
...
...
@@ -215,7 +215,7 @@ static inline void set_pp_bit(unsigned long pp, HPTE *addr)
__asm__
__volatile__
(
"1: ldarx %0,0,%3
\n
\
rldimi %0,%2,0,6
2
\n
\
rldimi %0,%2,0,6
1
\n
\
stdcx. %0,0,%3
\n
\
bne 1b"
:
"=&r"
(
old
),
"=m"
(
*
p
)
...
...
@@ -266,8 +266,6 @@ static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned
long
vpn
,
avpn
;
unsigned
long
flags
;
udbg_printf
(
"updatepp
\n
"
);
if
(
large
)
vpn
=
va
>>
LARGE_PAGE_SHIFT
;
else
...
...
arch/ppc64/kernel/pSeries_lpar.c
View file @
7de7e33b
...
...
@@ -647,11 +647,9 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp,
{
unsigned
long
lpar_rc
;
unsigned
long
flags
;
flags
=
(
newpp
&
3
)
|
H_AVPN
;
flags
=
(
newpp
&
7
)
|
H_AVPN
;
unsigned
long
vpn
=
va
>>
PAGE_SHIFT
;
udbg_printf
(
"updatepp
\n
"
);
lpar_rc
=
plpar_pte_protect
(
flags
,
slot
,
(
vpn
>>
4
)
&
~
0x7fUL
);
if
(
lpar_rc
==
H_Not_Found
)
{
...
...
arch/ppc64/mm/init.c
View file @
7de7e33b
...
...
@@ -113,13 +113,10 @@ void show_mem(void)
reserved
++
;
else
if
(
PageSwapCache
(
mem_map
+
i
))
cached
++
;
else
if
(
!
atomic_read
(
&
mem_map
[
i
].
count
))
free
++
;
else
shared
+=
atomic_read
(
&
mem_map
[
i
].
count
)
-
1
;
else
if
(
page_count
(
mem_map
+
i
))
shared
+=
page_count
(
mem_map
+
i
)
-
1
;
}
printk
(
"%d pages of RAM
\n
"
,
total
);
printk
(
"%d free pages
\n
"
,
free
);
printk
(
"%d reserved pages
\n
"
,
reserved
);
printk
(
"%d pages shared
\n
"
,
shared
);
printk
(
"%d pages swap cached
\n
"
,
cached
);
...
...
@@ -575,6 +572,12 @@ void flush_dcache_page(struct page *page)
void
flush_icache_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
)
{
if
(
__is_processor
(
PV_POWER4
))
return
;
if
((
vma
->
vm_flags
&
VM_EXEC
)
==
0
)
return
;
if
(
page
->
mapping
&&
!
PageReserved
(
page
)
&&
!
test_bit
(
PG_arch_1
,
&
page
->
flags
))
{
__flush_dcache_icache
(
page_address
(
page
));
...
...
@@ -585,13 +588,32 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page)
void
clear_user_page
(
void
*
page
,
unsigned
long
vaddr
,
struct
page
*
pg
)
{
clear_page
(
page
);
/* XXX we shouldnt have to do this, but glibc requires it */
if
(
__is_processor
(
PV_POWER4
))
clear_bit
(
PG_arch_1
,
&
pg
->
flags
);
else
__flush_dcache_icache
(
page
);
}
void
copy_user_page
(
void
*
vto
,
void
*
vfrom
,
unsigned
long
vaddr
,
struct
page
*
pg
)
{
copy_page
(
vto
,
vfrom
);
__flush_dcache_icache
(
vto
);
/*
* Unfortunately we havent always marked our GOT and PLT sections
* as executable, so we need to flush all file regions - Anton
*/
#if 0
if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
return;
#endif
if
(
__is_processor
(
PV_POWER4
))
clear_bit
(
PG_arch_1
,
&
pg
->
flags
);
else
__flush_dcache_icache
(
vto
);
}
void
flush_icache_user_range
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
...
...
@@ -605,7 +627,7 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
extern
pte_t
*
find_linux_pte
(
pgd_t
*
pgdir
,
unsigned
long
ea
);
int
__hash_page
(
unsigned
long
ea
,
unsigned
long
access
,
unsigned
long
vsid
,
pte_t
*
ptep
);
pte_t
*
ptep
,
unsigned
long
trap
);
/*
* This is called at the end of handling a user page fault, when the
...
...
@@ -633,5 +655,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
ptep
=
find_linux_pte
(
pgdir
,
ea
);
vsid
=
get_vsid
(
vma
->
vm_mm
->
context
,
ea
);
__hash_page
(
ea
,
pte_val
(
pte
)
&
(
_PAGE_USER
|
_PAGE_RW
),
vsid
,
ptep
);
__hash_page
(
ea
,
pte_val
(
pte
)
&
(
_PAGE_USER
|
_PAGE_RW
),
vsid
,
ptep
,
0x300
);
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment