Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
5e3679c5
Commit
5e3679c5
authored
Oct 10, 2009
by
Paul Mundt
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'sh/cachetlb'
parents
a469f627
f9e2bdfd
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
87 additions
and
425 deletions
+87
-425
arch/sh/include/asm/fixmap.h
arch/sh/include/asm/fixmap.h
+3
-3
arch/sh/mm/cache-sh4.c
arch/sh/mm/cache-sh4.c
+75
-421
arch/sh/mm/cache.c
arch/sh/mm/cache.c
+6
-0
arch/sh/mm/kmap.c
arch/sh/mm/kmap.c
+3
-1
No files found.
arch/sh/include/asm/fixmap.h
View file @
5e3679c5
...
@@ -14,9 +14,9 @@
...
@@ -14,9 +14,9 @@
#define _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/threads.h>
#include <asm/page.h>
#include <asm/page.h>
#ifdef CONFIG_HIGHMEM
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#include <asm/kmap_types.h>
#endif
#endif
...
@@ -46,9 +46,9 @@
...
@@ -46,9 +46,9 @@
* fix-mapped?
* fix-mapped?
*/
*/
enum
fixed_addresses
{
enum
fixed_addresses
{
#define FIX_N_COLOURS
16
#define FIX_N_COLOURS
8
FIX_CMAP_BEGIN
,
FIX_CMAP_BEGIN
,
FIX_CMAP_END
=
FIX_CMAP_BEGIN
+
FIX_N_COLOURS
,
FIX_CMAP_END
=
FIX_CMAP_BEGIN
+
(
FIX_N_COLOURS
*
NR_CPUS
)
,
FIX_UNCACHED
,
FIX_UNCACHED
,
#ifdef CONFIG_HIGHMEM
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN
,
/* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN
,
/* reserved pte's for temporary kernel mappings */
...
...
arch/sh/mm/cache-sh4.c
View file @
5e3679c5
...
@@ -2,7 +2,7 @@
...
@@ -2,7 +2,7 @@
* arch/sh/mm/cache-sh4.c
* arch/sh/mm/cache-sh4.c
*
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2001 - 200
7
Paul Mundt
* Copyright (C) 2001 - 200
9
Paul Mundt
* Copyright (C) 2003 Richard Curnow
* Copyright (C) 2003 Richard Curnow
* Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
* Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
*
*
...
@@ -15,6 +15,8 @@
...
@@ -15,6 +15,8 @@
#include <linux/io.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/cacheflush.h>
...
@@ -23,20 +25,11 @@
...
@@ -23,20 +25,11 @@
* flushing. Anything exceeding this will simply flush the dcache in its
* flushing. Anything exceeding this will simply flush the dcache in its
* entirety.
* entirety.
*/
*/
#define MAX_DCACHE_PAGES 64
/* XXX: Tune for ways */
#define MAX_ICACHE_PAGES 32
#define MAX_ICACHE_PAGES 32
static
void
__flush_cache_4096
(
unsigned
long
addr
,
unsigned
long
phys
,
static
void
__flush_cache_4096
(
unsigned
long
addr
,
unsigned
long
phys
,
unsigned
long
exec_offset
);
unsigned
long
exec_offset
);
/*
* This is initialised here to ensure that it is not placed in the BSS. If
* that were to happen, note that cache_init gets called before the BSS is
* cleared, so this would get nulled out which would be hopeless.
*/
static
void
(
*
__flush_dcache_segment_fn
)(
unsigned
long
,
unsigned
long
)
=
(
void
(
*
)(
unsigned
long
,
unsigned
long
))
0xdeadbeef
;
/*
/*
* Write back the range of D-cache, and purge the I-cache.
* Write back the range of D-cache, and purge the I-cache.
*
*
...
@@ -123,12 +116,12 @@ static void sh4_flush_dcache_page(void *arg)
...
@@ -123,12 +116,12 @@ static void sh4_flush_dcache_page(void *arg)
else
else
#endif
#endif
{
{
unsigned
long
phys
=
PHYSADDR
(
page_address
(
page
)
);
unsigned
long
phys
=
page_to_phys
(
page
);
unsigned
long
addr
=
CACHE_OC_ADDRESS_ARRAY
;
unsigned
long
addr
=
CACHE_OC_ADDRESS_ARRAY
;
int
i
,
n
;
int
i
,
n
;
/* Loop all the D-cache */
/* Loop all the D-cache */
n
=
boot_cpu_data
.
dcache
.
n_aliases
;
n
=
boot_cpu_data
.
dcache
.
way_incr
>>
12
;
for
(
i
=
0
;
i
<
n
;
i
++
,
addr
+=
4096
)
for
(
i
=
0
;
i
<
n
;
i
++
,
addr
+=
4096
)
flush_cache_4096
(
addr
,
phys
);
flush_cache_4096
(
addr
,
phys
);
}
}
...
@@ -158,10 +151,27 @@ static void __uses_jump_to_uncached flush_icache_all(void)
...
@@ -158,10 +151,27 @@ static void __uses_jump_to_uncached flush_icache_all(void)
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
static
inline
void
flush_dcache_all
(
void
)
static
void
flush_dcache_all
(
void
)
{
{
(
*
__flush_dcache_segment_fn
)(
0UL
,
boot_cpu_data
.
dcache
.
way_size
);
unsigned
long
addr
,
end_addr
,
entry_offset
;
wmb
();
end_addr
=
CACHE_OC_ADDRESS_ARRAY
+
(
current_cpu_data
.
dcache
.
sets
<<
current_cpu_data
.
dcache
.
entry_shift
)
*
current_cpu_data
.
dcache
.
ways
;
entry_offset
=
1
<<
current_cpu_data
.
dcache
.
entry_shift
;
for
(
addr
=
CACHE_OC_ADDRESS_ARRAY
;
addr
<
end_addr
;
)
{
__raw_writel
(
0
,
addr
);
addr
+=
entry_offset
;
__raw_writel
(
0
,
addr
);
addr
+=
entry_offset
;
__raw_writel
(
0
,
addr
);
addr
+=
entry_offset
;
__raw_writel
(
0
,
addr
);
addr
+=
entry_offset
;
__raw_writel
(
0
,
addr
);
addr
+=
entry_offset
;
__raw_writel
(
0
,
addr
);
addr
+=
entry_offset
;
__raw_writel
(
0
,
addr
);
addr
+=
entry_offset
;
__raw_writel
(
0
,
addr
);
addr
+=
entry_offset
;
}
}
}
static
void
sh4_flush_cache_all
(
void
*
unused
)
static
void
sh4_flush_cache_all
(
void
*
unused
)
...
@@ -170,89 +180,13 @@ static void sh4_flush_cache_all(void *unused)
...
@@ -170,89 +180,13 @@ static void sh4_flush_cache_all(void *unused)
flush_icache_all
();
flush_icache_all
();
}
}
static
void
__flush_cache_mm
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
d
=
0
,
p
=
start
&
PAGE_MASK
;
unsigned
long
alias_mask
=
boot_cpu_data
.
dcache
.
alias_mask
;
unsigned
long
n_aliases
=
boot_cpu_data
.
dcache
.
n_aliases
;
unsigned
long
select_bit
;
unsigned
long
all_aliases_mask
;
unsigned
long
addr_offset
;
pgd_t
*
dir
;
pmd_t
*
pmd
;
pud_t
*
pud
;
pte_t
*
pte
;
int
i
;
dir
=
pgd_offset
(
mm
,
p
);
pud
=
pud_offset
(
dir
,
p
);
pmd
=
pmd_offset
(
pud
,
p
);
end
=
PAGE_ALIGN
(
end
);
all_aliases_mask
=
(
1
<<
n_aliases
)
-
1
;
do
{
if
(
pmd_none
(
*
pmd
)
||
unlikely
(
pmd_bad
(
*
pmd
)))
{
p
&=
PMD_MASK
;
p
+=
PMD_SIZE
;
pmd
++
;
continue
;
}
pte
=
pte_offset_kernel
(
pmd
,
p
);
do
{
unsigned
long
phys
;
pte_t
entry
=
*
pte
;
if
(
!
(
pte_val
(
entry
)
&
_PAGE_PRESENT
))
{
pte
++
;
p
+=
PAGE_SIZE
;
continue
;
}
phys
=
pte_val
(
entry
)
&
PTE_PHYS_MASK
;
if
((
p
^
phys
)
&
alias_mask
)
{
d
|=
1
<<
((
p
&
alias_mask
)
>>
PAGE_SHIFT
);
d
|=
1
<<
((
phys
&
alias_mask
)
>>
PAGE_SHIFT
);
if
(
d
==
all_aliases_mask
)
goto
loop_exit
;
}
pte
++
;
p
+=
PAGE_SIZE
;
}
while
(
p
<
end
&&
((
unsigned
long
)
pte
&
~
PAGE_MASK
));
pmd
++
;
}
while
(
p
<
end
);
loop_exit:
addr_offset
=
0
;
select_bit
=
1
;
for
(
i
=
0
;
i
<
n_aliases
;
i
++
)
{
if
(
d
&
select_bit
)
{
(
*
__flush_dcache_segment_fn
)(
addr_offset
,
PAGE_SIZE
);
wmb
();
}
select_bit
<<=
1
;
addr_offset
+=
PAGE_SIZE
;
}
}
/*
/*
* Note : (RPC) since the caches are physically tagged, the only point
* Note : (RPC) since the caches are physically tagged, the only point
* of flush_cache_mm for SH-4 is to get rid of aliases from the
* of flush_cache_mm for SH-4 is to get rid of aliases from the
* D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
* D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
* lines can stay resident so long as the virtual address they were
* lines can stay resident so long as the virtual address they were
* accessed with (hence cache set) is in accord with the physical
* accessed with (hence cache set) is in accord with the physical
* address (i.e. tag). It's no different here. So I reckon we don't
* address (i.e. tag). It's no different here.
* need to flush the I-cache, since aliases don't matter for that. We
* should try that.
*
*
* Caller takes mm->mmap_sem.
* Caller takes mm->mmap_sem.
*/
*/
...
@@ -263,33 +197,7 @@ static void sh4_flush_cache_mm(void *arg)
...
@@ -263,33 +197,7 @@ static void sh4_flush_cache_mm(void *arg)
if
(
cpu_context
(
smp_processor_id
(),
mm
)
==
NO_CONTEXT
)
if
(
cpu_context
(
smp_processor_id
(),
mm
)
==
NO_CONTEXT
)
return
;
return
;
/*
flush_dcache_all
();
* If cache is only 4k-per-way, there are never any 'aliases'. Since
* the cache is physically tagged, the data can just be left in there.
*/
if
(
boot_cpu_data
.
dcache
.
n_aliases
==
0
)
return
;
/*
* Don't bother groveling around the dcache for the VMA ranges
* if there are too many PTEs to make it worthwhile.
*/
if
(
mm
->
nr_ptes
>=
MAX_DCACHE_PAGES
)
flush_dcache_all
();
else
{
struct
vm_area_struct
*
vma
;
/*
* In this case there are reasonably sized ranges to flush,
* iterate through the VMA list and take care of any aliases.
*/
for
(
vma
=
mm
->
mmap
;
vma
;
vma
=
vma
->
vm_next
)
__flush_cache_mm
(
mm
,
vma
->
vm_start
,
vma
->
vm_end
);
}
/* Only touch the icache if one of the VMAs has VM_EXEC set. */
if
(
mm
->
exec_vm
)
flush_icache_all
();
}
}
/*
/*
...
@@ -302,44 +210,64 @@ static void sh4_flush_cache_page(void *args)
...
@@ -302,44 +210,64 @@ static void sh4_flush_cache_page(void *args)
{
{
struct
flusher_data
*
data
=
args
;
struct
flusher_data
*
data
=
args
;
struct
vm_area_struct
*
vma
;
struct
vm_area_struct
*
vma
;
struct
page
*
page
;
unsigned
long
address
,
pfn
,
phys
;
unsigned
long
address
,
pfn
,
phys
;
unsigned
int
alias_mask
;
int
map_coherent
=
0
;
pgd_t
*
pgd
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pte_t
*
pte
;
void
*
vaddr
;
vma
=
data
->
vma
;
vma
=
data
->
vma
;
address
=
data
->
addr1
;
address
=
data
->
addr1
;
pfn
=
data
->
addr2
;
pfn
=
data
->
addr2
;
phys
=
pfn
<<
PAGE_SHIFT
;
phys
=
pfn
<<
PAGE_SHIFT
;
page
=
pfn_to_page
(
pfn
);
if
(
cpu_context
(
smp_processor_id
(),
vma
->
vm_mm
)
==
NO_CONTEXT
)
if
(
cpu_context
(
smp_processor_id
(),
vma
->
vm_mm
)
==
NO_CONTEXT
)
return
;
return
;
alias_mask
=
boot_cpu_data
.
dcache
.
alias_mask
;
address
&=
PAGE_MASK
;
pgd
=
pgd_offset
(
vma
->
vm_mm
,
address
);
/* We only need to flush D-cache when we have alias */
pud
=
pud_offset
(
pgd
,
address
);
if
((
address
^
phys
)
&
alias_mask
)
{
pmd
=
pmd_offset
(
pud
,
address
);
/* Loop 4K of the D-cache */
pte
=
pte_offset_kernel
(
pmd
,
address
);
flush_cache_4096
(
CACHE_OC_ADDRESS_ARRAY
|
(
address
&
alias_mask
),
/* If the page isn't present, there is nothing to do here. */
phys
);
if
(
!
(
pte_val
(
*
pte
)
&
_PAGE_PRESENT
))
/* Loop another 4K of the D-cache */
return
;
flush_cache_4096
(
CACHE_OC_ADDRESS_ARRAY
|
(
phys
&
alias_mask
),
phys
);
}
alias_mask
=
boot_cpu_data
.
icache
.
alias_mask
;
if
((
vma
->
vm_mm
==
current
->
active_mm
))
if
(
vma
->
vm_flags
&
VM_EXEC
)
{
vaddr
=
NULL
;
else
{
/*
/*
* Evict entries from the portion of the cache from which code
* Use kmap_coherent or kmap_atomic to do flushes for
* may have been executed at this address (virtual). There's
* another ASID than the current one.
* no need to evict from the portion corresponding to the
* physical address as for the D-cache, because we know the
* kernel has never executed the code through its identity
* translation.
*/
*/
flush_cache_4096
(
map_coherent
=
(
current_cpu_data
.
dcache
.
n_aliases
&&
CACHE_IC_ADDRESS_ARRAY
|
(
address
&
alias_mask
),
!
test_bit
(
PG_dcache_dirty
,
&
page
->
flags
)
&&
phys
);
page_mapped
(
page
));
if
(
map_coherent
)
vaddr
=
kmap_coherent
(
page
,
address
);
else
vaddr
=
kmap_atomic
(
page
,
KM_USER0
);
address
=
(
unsigned
long
)
vaddr
;
}
if
(
pages_do_alias
(
address
,
phys
))
flush_cache_4096
(
CACHE_OC_ADDRESS_ARRAY
|
(
address
&
shm_align_mask
),
phys
);
if
(
vma
->
vm_flags
&
VM_EXEC
)
flush_icache_all
();
if
(
vaddr
)
{
if
(
map_coherent
)
kunmap_coherent
(
vaddr
);
else
kunmap_atomic
(
vaddr
,
KM_USER0
);
}
}
}
}
...
@@ -372,24 +300,10 @@ static void sh4_flush_cache_range(void *args)
...
@@ -372,24 +300,10 @@ static void sh4_flush_cache_range(void *args)
if
(
boot_cpu_data
.
dcache
.
n_aliases
==
0
)
if
(
boot_cpu_data
.
dcache
.
n_aliases
==
0
)
return
;
return
;
/*
flush_dcache_all
();
* Don't bother with the lookup and alias check if we have a
* wide range to cover, just blow away the dcache in its
* entirety instead. -- PFM.
*/
if
(((
end
-
start
)
>>
PAGE_SHIFT
)
>=
MAX_DCACHE_PAGES
)
flush_dcache_all
();
else
__flush_cache_mm
(
vma
->
vm_mm
,
start
,
end
);
if
(
vma
->
vm_flags
&
VM_EXEC
)
{
if
(
vma
->
vm_flags
&
VM_EXEC
)
/*
* TODO: Is this required??? Need to look at how I-cache
* coherency is assured when new programs are loaded to see if
* this matters.
*/
flush_icache_all
();
flush_icache_all
();
}
}
}
/**
/**
...
@@ -443,7 +357,7 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
...
@@ -443,7 +357,7 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
* pointless nead-of-loop check for 0 iterations.
* pointless nead-of-loop check for 0 iterations.
*/
*/
do
{
do
{
ea
=
base_addr
+
PAGE_SIZE
;
ea
=
base_addr
+
4096
;
a
=
base_addr
;
a
=
base_addr
;
p
=
phys
;
p
=
phys
;
...
@@ -463,245 +377,6 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
...
@@ -463,245 +377,6 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
}
while
(
--
way_count
!=
0
);
}
while
(
--
way_count
!=
0
);
}
}
/*
* Break the 1, 2 and 4 way variants of this out into separate functions to
* avoid nearly all the overhead of having the conditional stuff in the function
* bodies (+ the 1 and 2 way cases avoid saving any registers too).
*
* We want to eliminate unnecessary bus transactions, so this code uses
* a non-obvious technique.
*
* Loop over a cache way sized block of, one cache line at a time. For each
* line, use movca.a to cause the current cache line contents to be written
* back, but without reading anything from main memory. However this has the
* side effect that the cache is now caching that memory location. So follow
* this with a cache invalidate to mark the cache line invalid. And do all
* this with interrupts disabled, to avoid the cache line being accidently
* evicted while it is holding garbage.
*
* This also breaks in a number of circumstances:
* - if there are modifications to the region of memory just above
* empty_zero_page (for example because a breakpoint has been placed
* there), then these can be lost.
*
* This is because the the memory address which the cache temporarily
* caches in the above description is empty_zero_page. So the
* movca.l hits the cache (it is assumed that it misses, or at least
* isn't dirty), modifies the line and then invalidates it, losing the
* required change.
*
* - If caches are disabled or configured in write-through mode, then
* the movca.l writes garbage directly into memory.
*/
static
void
__flush_dcache_segment_writethrough
(
unsigned
long
start
,
unsigned
long
extent_per_way
)
{
unsigned
long
addr
;
int
i
;
addr
=
CACHE_OC_ADDRESS_ARRAY
|
(
start
&
cpu_data
->
dcache
.
entry_mask
);
while
(
extent_per_way
)
{
for
(
i
=
0
;
i
<
cpu_data
->
dcache
.
ways
;
i
++
)
__raw_writel
(
0
,
addr
+
cpu_data
->
dcache
.
way_incr
*
i
);
addr
+=
cpu_data
->
dcache
.
linesz
;
extent_per_way
-=
cpu_data
->
dcache
.
linesz
;
}
}
static
void
__flush_dcache_segment_1way
(
unsigned
long
start
,
unsigned
long
extent_per_way
)
{
unsigned
long
orig_sr
,
sr_with_bl
;
unsigned
long
base_addr
;
unsigned
long
way_incr
,
linesz
,
way_size
;
struct
cache_info
*
dcache
;
register
unsigned
long
a0
,
a0e
;
asm
volatile
(
"stc sr, %0"
:
"=r"
(
orig_sr
));
sr_with_bl
=
orig_sr
|
(
1
<<
28
);
base_addr
=
((
unsigned
long
)
&
empty_zero_page
[
0
]);
/*
* The previous code aligned base_addr to 16k, i.e. the way_size of all
* existing SH-4 D-caches. Whilst I don't see a need to have this
* aligned to any better than the cache line size (which it will be
* anyway by construction), let's align it to at least the way_size of
* any existing or conceivable SH-4 D-cache. -- RPC
*/
base_addr
=
((
base_addr
>>
16
)
<<
16
);
base_addr
|=
start
;
dcache
=
&
boot_cpu_data
.
dcache
;
linesz
=
dcache
->
linesz
;
way_incr
=
dcache
->
way_incr
;
way_size
=
dcache
->
way_size
;
a0
=
base_addr
;
a0e
=
base_addr
+
extent_per_way
;
do
{
asm
volatile
(
"ldc %0, sr"
:
:
"r"
(
sr_with_bl
));
asm
volatile
(
"movca.l r0, @%0
\n\t
"
"ocbi @%0"
:
:
"r"
(
a0
));
a0
+=
linesz
;
asm
volatile
(
"movca.l r0, @%0
\n\t
"
"ocbi @%0"
:
:
"r"
(
a0
));
a0
+=
linesz
;
asm
volatile
(
"movca.l r0, @%0
\n\t
"
"ocbi @%0"
:
:
"r"
(
a0
));
a0
+=
linesz
;
asm
volatile
(
"movca.l r0, @%0
\n\t
"
"ocbi @%0"
:
:
"r"
(
a0
));
asm
volatile
(
"ldc %0, sr"
:
:
"r"
(
orig_sr
));
a0
+=
linesz
;
}
while
(
a0
<
a0e
);
}
static
void
__flush_dcache_segment_2way
(
unsigned
long
start
,
unsigned
long
extent_per_way
)
{
unsigned
long
orig_sr
,
sr_with_bl
;
unsigned
long
base_addr
;
unsigned
long
way_incr
,
linesz
,
way_size
;
struct
cache_info
*
dcache
;
register
unsigned
long
a0
,
a1
,
a0e
;
asm
volatile
(
"stc sr, %0"
:
"=r"
(
orig_sr
));
sr_with_bl
=
orig_sr
|
(
1
<<
28
);
base_addr
=
((
unsigned
long
)
&
empty_zero_page
[
0
]);
/* See comment under 1-way above */
base_addr
=
((
base_addr
>>
16
)
<<
16
);
base_addr
|=
start
;
dcache
=
&
boot_cpu_data
.
dcache
;
linesz
=
dcache
->
linesz
;
way_incr
=
dcache
->
way_incr
;
way_size
=
dcache
->
way_size
;
a0
=
base_addr
;
a1
=
a0
+
way_incr
;
a0e
=
base_addr
+
extent_per_way
;
do
{
asm
volatile
(
"ldc %0, sr"
:
:
"r"
(
sr_with_bl
));
asm
volatile
(
"movca.l r0, @%0
\n\t
"
"movca.l r0, @%1
\n\t
"
"ocbi @%0
\n\t
"
"ocbi @%1"
:
:
"r"
(
a0
),
"r"
(
a1
));
a0
+=
linesz
;
a1
+=
linesz
;
asm
volatile
(
"movca.l r0, @%0
\n\t
"
"movca.l r0, @%1
\n\t
"
"ocbi @%0
\n\t
"
"ocbi @%1"
:
:
"r"
(
a0
),
"r"
(
a1
));
a0
+=
linesz
;
a1
+=
linesz
;
asm
volatile
(
"movca.l r0, @%0
\n\t
"
"movca.l r0, @%1
\n\t
"
"ocbi @%0
\n\t
"
"ocbi @%1"
:
:
"r"
(
a0
),
"r"
(
a1
));
a0
+=
linesz
;
a1
+=
linesz
;
asm
volatile
(
"movca.l r0, @%0
\n\t
"
"movca.l r0, @%1
\n\t
"
"ocbi @%0
\n\t
"
"ocbi @%1"
:
:
"r"
(
a0
),
"r"
(
a1
));
asm
volatile
(
"ldc %0, sr"
:
:
"r"
(
orig_sr
));
a0
+=
linesz
;
a1
+=
linesz
;
}
while
(
a0
<
a0e
);
}
static
void
__flush_dcache_segment_4way
(
unsigned
long
start
,
unsigned
long
extent_per_way
)
{
unsigned
long
orig_sr
,
sr_with_bl
;
unsigned
long
base_addr
;
unsigned
long
way_incr
,
linesz
,
way_size
;
struct
cache_info
*
dcache
;
register
unsigned
long
a0
,
a1
,
a2
,
a3
,
a0e
;
asm
volatile
(
"stc sr, %0"
:
"=r"
(
orig_sr
));
sr_with_bl
=
orig_sr
|
(
1
<<
28
);
base_addr
=
((
unsigned
long
)
&
empty_zero_page
[
0
]);
/* See comment under 1-way above */
base_addr
=
((
base_addr
>>
16
)
<<
16
);
base_addr
|=
start
;
dcache
=
&
boot_cpu_data
.
dcache
;
linesz
=
dcache
->
linesz
;
way_incr
=
dcache
->
way_incr
;
way_size
=
dcache
->
way_size
;
a0
=
base_addr
;
a1
=
a0
+
way_incr
;
a2
=
a1
+
way_incr
;
a3
=
a2
+
way_incr
;
a0e
=
base_addr
+
extent_per_way
;
do
{
asm
volatile
(
"ldc %0, sr"
:
:
"r"
(
sr_with_bl
));
asm
volatile
(
"movca.l r0, @%0
\n\t
"
"movca.l r0, @%1
\n\t
"
"movca.l r0, @%2
\n\t
"
"movca.l r0, @%3
\n\t
"
"ocbi @%0
\n\t
"
"ocbi @%1
\n\t
"
"ocbi @%2
\n\t
"
"ocbi @%3
\n\t
"
:
:
"r"
(
a0
),
"r"
(
a1
),
"r"
(
a2
),
"r"
(
a3
));
a0
+=
linesz
;
a1
+=
linesz
;
a2
+=
linesz
;
a3
+=
linesz
;
asm
volatile
(
"movca.l r0, @%0
\n\t
"
"movca.l r0, @%1
\n\t
"
"movca.l r0, @%2
\n\t
"
"movca.l r0, @%3
\n\t
"
"ocbi @%0
\n\t
"
"ocbi @%1
\n\t
"
"ocbi @%2
\n\t
"
"ocbi @%3
\n\t
"
:
:
"r"
(
a0
),
"r"
(
a1
),
"r"
(
a2
),
"r"
(
a3
));
a0
+=
linesz
;
a1
+=
linesz
;
a2
+=
linesz
;
a3
+=
linesz
;
asm
volatile
(
"movca.l r0, @%0
\n\t
"
"movca.l r0, @%1
\n\t
"
"movca.l r0, @%2
\n\t
"
"movca.l r0, @%3
\n\t
"
"ocbi @%0
\n\t
"
"ocbi @%1
\n\t
"
"ocbi @%2
\n\t
"
"ocbi @%3
\n\t
"
:
:
"r"
(
a0
),
"r"
(
a1
),
"r"
(
a2
),
"r"
(
a3
));
a0
+=
linesz
;
a1
+=
linesz
;
a2
+=
linesz
;
a3
+=
linesz
;
asm
volatile
(
"movca.l r0, @%0
\n\t
"
"movca.l r0, @%1
\n\t
"
"movca.l r0, @%2
\n\t
"
"movca.l r0, @%3
\n\t
"
"ocbi @%0
\n\t
"
"ocbi @%1
\n\t
"
"ocbi @%2
\n\t
"
"ocbi @%3
\n\t
"
:
:
"r"
(
a0
),
"r"
(
a1
),
"r"
(
a2
),
"r"
(
a3
));
asm
volatile
(
"ldc %0, sr"
:
:
"r"
(
orig_sr
));
a0
+=
linesz
;
a1
+=
linesz
;
a2
+=
linesz
;
a3
+=
linesz
;
}
while
(
a0
<
a0e
);
}
extern
void
__weak
sh4__flush_region_init
(
void
);
extern
void
__weak
sh4__flush_region_init
(
void
);
/*
/*
...
@@ -709,32 +384,11 @@ extern void __weak sh4__flush_region_init(void);
...
@@ -709,32 +384,11 @@ extern void __weak sh4__flush_region_init(void);
*/
*/
void
__init
sh4_cache_init
(
void
)
void
__init
sh4_cache_init
(
void
)
{
{
unsigned
int
wt_enabled
=
!!
(
__raw_readl
(
CCR
)
&
CCR_CACHE_WT
);
printk
(
"PVR=%08x CVR=%08x PRR=%08x
\n
"
,
printk
(
"PVR=%08x CVR=%08x PRR=%08x
\n
"
,
ctrl_inl
(
CCN_PVR
),
ctrl_inl
(
CCN_PVR
),
ctrl_inl
(
CCN_CVR
),
ctrl_inl
(
CCN_CVR
),
ctrl_inl
(
CCN_PRR
));
ctrl_inl
(
CCN_PRR
));
if
(
wt_enabled
)
__flush_dcache_segment_fn
=
__flush_dcache_segment_writethrough
;
else
{
switch
(
boot_cpu_data
.
dcache
.
ways
)
{
case
1
:
__flush_dcache_segment_fn
=
__flush_dcache_segment_1way
;
break
;
case
2
:
__flush_dcache_segment_fn
=
__flush_dcache_segment_2way
;
break
;
case
4
:
__flush_dcache_segment_fn
=
__flush_dcache_segment_4way
;
break
;
default:
panic
(
"unknown number of cache ways
\n
"
);
break
;
}
}
local_flush_icache_range
=
sh4_flush_icache_range
;
local_flush_icache_range
=
sh4_flush_icache_range
;
local_flush_dcache_page
=
sh4_flush_dcache_page
;
local_flush_dcache_page
=
sh4_flush_dcache_page
;
local_flush_cache_all
=
sh4_flush_cache_all
;
local_flush_cache_all
=
sh4_flush_cache_all
;
...
...
arch/sh/mm/cache.c
View file @
5e3679c5
...
@@ -164,11 +164,17 @@ void flush_cache_all(void)
...
@@ -164,11 +164,17 @@ void flush_cache_all(void)
void
flush_cache_mm
(
struct
mm_struct
*
mm
)
void
flush_cache_mm
(
struct
mm_struct
*
mm
)
{
{
if
(
boot_cpu_data
.
dcache
.
n_aliases
==
0
)
return
;
cacheop_on_each_cpu
(
local_flush_cache_mm
,
mm
,
1
);
cacheop_on_each_cpu
(
local_flush_cache_mm
,
mm
,
1
);
}
}
void
flush_cache_dup_mm
(
struct
mm_struct
*
mm
)
void
flush_cache_dup_mm
(
struct
mm_struct
*
mm
)
{
{
if
(
boot_cpu_data
.
dcache
.
n_aliases
==
0
)
return
;
cacheop_on_each_cpu
(
local_flush_cache_dup_mm
,
mm
,
1
);
cacheop_on_each_cpu
(
local_flush_cache_dup_mm
,
mm
,
1
);
}
}
...
...
arch/sh/mm/kmap.c
View file @
5e3679c5
...
@@ -39,7 +39,9 @@ void *kmap_coherent(struct page *page, unsigned long addr)
...
@@ -39,7 +39,9 @@ void *kmap_coherent(struct page *page, unsigned long addr)
pagefault_disable
();
pagefault_disable
();
idx
=
FIX_CMAP_END
-
idx
=
FIX_CMAP_END
-
((
addr
&
current_cpu_data
.
dcache
.
alias_mask
)
>>
PAGE_SHIFT
);
(((
addr
>>
PAGE_SHIFT
)
&
(
FIX_N_COLOURS
-
1
))
+
(
FIX_N_COLOURS
*
smp_processor_id
()));
vaddr
=
__fix_to_virt
(
idx
);
vaddr
=
__fix_to_virt
(
idx
);
BUG_ON
(
!
pte_none
(
*
(
kmap_coherent_pte
-
idx
)));
BUG_ON
(
!
pte_none
(
*
(
kmap_coherent_pte
-
idx
)));
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment