Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
2b7df078
Commit
2b7df078
authored
Mar 06, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://kernel.bkbits.net/davem/flush_cache_page-2.6
into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents
61f80950
28ff874f
Changes
41
Hide whitespace changes
Inline
Side-by-side
Showing
41 changed files
with
104 additions
and
164 deletions
+104
-164
Documentation/cachetlb.txt
Documentation/cachetlb.txt
+9
-3
arch/arm/mm/fault-armv.c
arch/arm/mm/fault-armv.c
+2
-2
arch/arm/mm/flush.c
arch/arm/mm/flush.c
+1
-1
arch/mips/mm/c-r3k.c
arch/mips/mm/c-r3k.c
+1
-2
arch/mips/mm/c-r4k.c
arch/mips/mm/c-r4k.c
+1
-2
arch/mips/mm/c-sb1.c
arch/mips/mm/c-sb1.c
+6
-5
arch/mips/mm/c-tx39.c
arch/mips/mm/c-tx39.c
+1
-2
arch/mips/mm/cache.c
arch/mips/mm/cache.c
+1
-1
arch/sh/mm/cache-sh4.c
arch/sh/mm/cache-sh4.c
+10
-30
arch/sh/mm/cache-sh7705.c
arch/sh/mm/cache-sh7705.c
+2
-18
arch/sh64/mm/cache.c
arch/sh64/mm/cache.c
+2
-27
arch/sparc/mm/srmmu.c
arch/sparc/mm/srmmu.c
+1
-2
fs/binfmt_elf.c
fs/binfmt_elf.c
+1
-1
include/asm-alpha/cacheflush.h
include/asm-alpha/cacheflush.h
+1
-1
include/asm-arm/cacheflush.h
include/asm-arm/cacheflush.h
+8
-8
include/asm-arm26/cacheflush.h
include/asm-arm26/cacheflush.h
+1
-1
include/asm-cris/cacheflush.h
include/asm-cris/cacheflush.h
+1
-1
include/asm-frv/cacheflush.h
include/asm-frv/cacheflush.h
+1
-1
include/asm-h8300/cacheflush.h
include/asm-h8300/cacheflush.h
+1
-1
include/asm-i386/cacheflush.h
include/asm-i386/cacheflush.h
+1
-1
include/asm-ia64/cacheflush.h
include/asm-ia64/cacheflush.h
+1
-1
include/asm-m32r/cacheflush.h
include/asm-m32r/cacheflush.h
+3
-3
include/asm-m68k/cacheflush.h
include/asm-m68k/cacheflush.h
+7
-8
include/asm-m68knommu/cacheflush.h
include/asm-m68knommu/cacheflush.h
+1
-1
include/asm-mips/cacheflush.h
include/asm-mips/cacheflush.h
+2
-3
include/asm-parisc/cacheflush.h
include/asm-parisc/cacheflush.h
+3
-3
include/asm-ppc/cacheflush.h
include/asm-ppc/cacheflush.h
+1
-1
include/asm-ppc64/cacheflush.h
include/asm-ppc64/cacheflush.h
+1
-1
include/asm-s390/cacheflush.h
include/asm-s390/cacheflush.h
+1
-1
include/asm-sh/cacheflush.h
include/asm-sh/cacheflush.h
+2
-2
include/asm-sh/cpu-sh2/cacheflush.h
include/asm-sh/cpu-sh2/cacheflush.h
+2
-2
include/asm-sh/cpu-sh3/cacheflush.h
include/asm-sh/cpu-sh3/cacheflush.h
+3
-3
include/asm-sh/cpu-sh4/cacheflush.h
include/asm-sh/cpu-sh4/cacheflush.h
+1
-1
include/asm-sh64/cacheflush.h
include/asm-sh64/cacheflush.h
+3
-3
include/asm-sparc/cacheflush.h
include/asm-sparc/cacheflush.h
+7
-7
include/asm-sparc64/cacheflush.h
include/asm-sparc64/cacheflush.h
+7
-7
include/asm-v850/cacheflush.h
include/asm-v850/cacheflush.h
+1
-1
include/asm-x86_64/cacheflush.h
include/asm-x86_64/cacheflush.h
+1
-1
mm/fremap.c
mm/fremap.c
+1
-1
mm/memory.c
mm/memory.c
+2
-2
mm/rmap.c
mm/rmap.c
+2
-2
No files found.
Documentation/cachetlb.txt
View file @
2b7df078
...
...
@@ -155,7 +155,7 @@ the sequence will be in one of the following forms:
change_range_of_page_tables(mm, start, end);
flush_tlb_range(vma, start, end);
3) flush_cache_page(vma, addr);
3) flush_cache_page(vma, addr
, pfn
);
set_pte(pte_pointer, new_pte_val);
flush_tlb_page(vma, addr);
...
...
@@ -203,7 +203,7 @@ Here are the routines, one by one:
call flush_cache_page (see below) for each entry which may be
modified.
3) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr)
3) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr
, unsigned long pfn
)
This time we need to remove a PAGE_SIZE sized range
from the cache. The 'vma' is the backing structure used by
...
...
@@ -213,8 +213,14 @@ Here are the routines, one by one:
executable (and thus could be in the 'instruction cache' in
"Harvard" type cache layouts).
The 'pfn' indicates the physical page frame (shift this value
left by PAGE_SHIFT to get the physical address) that 'addr'
translates to. It is this mapping which should be removed from
the cache.
After running, there will be no entries in the cache for
'vma->vm_mm' for virtual address 'addr'.
'vma->vm_mm' for virtual address 'addr' which translates
to 'pfn'.
This is used primarily during fault processing.
...
...
arch/arm/mm/fault-armv.c
View file @
2b7df078
...
...
@@ -54,7 +54,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
* fault (ie, is old), we can safely ignore any issues.
*/
if
(
pte_present
(
entry
)
&&
pte_val
(
entry
)
&
shared_pte_mask
)
{
flush_cache_page
(
vma
,
address
);
flush_cache_page
(
vma
,
address
,
pte_pfn
(
entry
)
);
pte_val
(
entry
)
&=
~
shared_pte_mask
;
set_pte
(
pte
,
entry
);
flush_tlb_page
(
vma
,
address
);
...
...
@@ -115,7 +115,7 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
if
(
aliases
)
adjust_pte
(
vma
,
addr
);
else
flush_cache_page
(
vma
,
addr
);
flush_cache_page
(
vma
,
addr
,
page_to_pfn
(
page
)
);
}
/*
...
...
arch/arm/mm/flush.c
View file @
2b7df078
...
...
@@ -56,7 +56,7 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
if
(
!
(
mpnt
->
vm_flags
&
VM_MAYSHARE
))
continue
;
offset
=
(
pgoff
-
mpnt
->
vm_pgoff
)
<<
PAGE_SHIFT
;
flush_cache_page
(
mpnt
,
mpnt
->
vm_start
+
offset
);
flush_cache_page
(
mpnt
,
mpnt
->
vm_start
+
offset
,
page_to_pfn
(
page
)
);
if
(
cache_is_vipt
())
break
;
}
...
...
arch/mips/mm/c-r3k.c
View file @
2b7df078
...
...
@@ -254,8 +254,7 @@ static void r3k_flush_cache_range(struct vm_area_struct *vma,
{
}
static
void
r3k_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
)
static
void
r3k_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
,
unsigned
long
pfn
)
{
}
...
...
arch/mips/mm/c-r4k.c
View file @
2b7df078
...
...
@@ -426,8 +426,7 @@ static inline void local_r4k_flush_cache_page(void *args)
}
}
static
void
r4k_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
)
static
void
r4k_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
,
unsigned
long
pfn
)
{
struct
flush_cache_page_args
args
;
...
...
arch/mips/mm/c-sb1.c
View file @
2b7df078
...
...
@@ -160,8 +160,7 @@ static inline void __sb1_flush_icache_all(void)
* dcache first, then invalidate the icache. If the page isn't
* executable, nothing is required.
*/
static
void
local_sb1_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
)
static
void
local_sb1_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
,
unsigned
long
pfn
)
{
int
cpu
=
smp_processor_id
();
...
...
@@ -183,17 +182,18 @@ static void local_sb1_flush_cache_page(struct vm_area_struct *vma,
struct
flush_cache_page_args
{
struct
vm_area_struct
*
vma
;
unsigned
long
addr
;
unsigned
long
pfn
;
};
static
void
sb1_flush_cache_page_ipi
(
void
*
info
)
{
struct
flush_cache_page_args
*
args
=
info
;
local_sb1_flush_cache_page
(
args
->
vma
,
args
->
addr
);
local_sb1_flush_cache_page
(
args
->
vma
,
args
->
addr
,
args
->
pfn
);
}
/* Dirty dcache could be on another CPU, so do the IPIs */
static
void
sb1_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
)
static
void
sb1_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
,
unsigned
long
pfn
)
{
struct
flush_cache_page_args
args
;
...
...
@@ -203,10 +203,11 @@ static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr)
addr
&=
PAGE_MASK
;
args
.
vma
=
vma
;
args
.
addr
=
addr
;
args
.
pfn
=
pfn
;
on_each_cpu
(
sb1_flush_cache_page_ipi
,
(
void
*
)
&
args
,
1
,
1
);
}
#else
void
sb1_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
)
void
sb1_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
,
unsigned
long
pfn
)
__attribute__
((
alias
(
"local_sb1_flush_cache_page"
)));
#endif
...
...
arch/mips/mm/c-tx39.c
View file @
2b7df078
...
...
@@ -178,8 +178,7 @@ static void tx39_flush_cache_range(struct vm_area_struct *vma,
}
}
static
void
tx39_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
)
static
void
tx39_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
,
unsigned
long
pfn
)
{
int
exec
=
vma
->
vm_flags
&
VM_EXEC
;
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
...
...
arch/mips/mm/cache.c
View file @
2b7df078
...
...
@@ -23,7 +23,7 @@ void (*__flush_cache_all)(void);
void
(
*
flush_cache_mm
)(
struct
mm_struct
*
mm
);
void
(
*
flush_cache_range
)(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
void
(
*
flush_cache_page
)(
struct
vm_area_struct
*
vma
,
unsigned
long
page
);
void
(
*
flush_cache_page
)(
struct
vm_area_struct
*
vma
,
unsigned
long
page
,
unsigned
long
pfn
);
void
(
*
flush_icache_range
)(
unsigned
long
start
,
unsigned
long
end
);
void
(
*
flush_icache_page
)(
struct
vm_area_struct
*
vma
,
struct
page
*
page
);
...
...
arch/sh/mm/cache-sh4.c
View file @
2b7df078
...
...
@@ -258,10 +258,16 @@ void flush_cache_mm(struct mm_struct *mm)
flush_cache_all
();
}
static
void
__flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
unsigned
long
phys
)
/*
* Write back and invalidate I/D-caches for the page.
*
* ADDR: Virtual Address (U0 address)
* PFN: Physical page number
*/
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
unsigned
long
pfn
)
{
unsigned
long
phys
=
pfn
<<
PAGE_SHIFT
;
/* We only need to flush D-cache when we have alias */
if
((
address
^
phys
)
&
CACHE_ALIAS
)
{
/* Loop 4K of the D-cache */
...
...
@@ -341,32 +347,6 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
flush_icache_all
();
}
/*
* Write back and invalidate I/D-caches for the page.
*
* ADDR: Virtual Address (U0 address)
*/
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
)
{
pgd_t
*
dir
;
pmd_t
*
pmd
;
pte_t
*
pte
;
pte_t
entry
;
unsigned
long
phys
;
dir
=
pgd_offset
(
vma
->
vm_mm
,
address
);
pmd
=
pmd_offset
(
dir
,
address
);
if
(
pmd_none
(
*
pmd
)
||
pmd_bad
(
*
pmd
))
return
;
pte
=
pte_offset_kernel
(
pmd
,
address
);
entry
=
*
pte
;
if
(
!
(
pte_val
(
entry
)
&
_PAGE_PRESENT
))
return
;
phys
=
pte_val
(
entry
)
&
PTE_PHYS_MASK
;
__flush_cache_page
(
vma
,
address
,
phys
);
}
/*
* flush_icache_user_range
* @vma: VMA of the process
...
...
@@ -377,6 +357,6 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address)
void
flush_icache_user_range
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
addr
,
int
len
)
{
__flush_cache_page
(
vma
,
addr
,
PHYSADDR
(
page_address
(
page
)
));
flush_cache_page
(
vma
,
addr
,
page_to_pfn
(
page
));
}
arch/sh/mm/cache-sh7705.c
View file @
2b7df078
...
...
@@ -186,25 +186,9 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
*
* ADDRESS: Virtual Address (U0 address)
*/
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
)
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
unsigned
long
pfn
)
{
pgd_t
*
dir
;
pmd_t
*
pmd
;
pte_t
*
pte
;
pte_t
entry
;
unsigned
long
phys
;
dir
=
pgd_offset
(
vma
->
vm_mm
,
address
);
pmd
=
pmd_offset
(
dir
,
address
);
if
(
pmd_none
(
*
pmd
)
||
pmd_bad
(
*
pmd
))
return
;
pte
=
pte_offset
(
pmd
,
address
);
entry
=
*
pte
;
if
(
pte_none
(
entry
)
||
!
pte_present
(
entry
))
return
;
phys
=
pte_val
(
entry
)
&
PTE_PHYS_MASK
;
__flush_dcache_page
(
phys
);
__flush_dcache_page
(
pfn
<<
PAGE_SHIFT
);
}
/*
...
...
arch/sh64/mm/cache.c
View file @
2b7df078
...
...
@@ -573,31 +573,6 @@ static void sh64_dcache_purge_phy_page(unsigned long paddr)
}
}
static
void
sh64_dcache_purge_virt_page
(
struct
mm_struct
*
mm
,
unsigned
long
eaddr
)
{
unsigned
long
phys
;
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pte_t
*
pte
;
pte_t
entry
;
pgd
=
pgd_offset
(
mm
,
eaddr
);
pmd
=
pmd_offset
(
pgd
,
eaddr
);
if
(
pmd_none
(
*
pmd
)
||
pmd_bad
(
*
pmd
))
return
;
pte
=
pte_offset_kernel
(
pmd
,
eaddr
);
entry
=
*
pte
;
if
(
pte_none
(
entry
)
||
!
pte_present
(
entry
))
return
;
phys
=
pte_val
(
entry
)
&
PAGE_MASK
;
sh64_dcache_purge_phy_page
(
phys
);
}
static
void
sh64_dcache_purge_user_page
(
struct
mm_struct
*
mm
,
unsigned
long
eaddr
)
{
pgd_t
*
pgd
;
...
...
@@ -904,7 +879,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
/****************************************************************************/
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
eaddr
)
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
eaddr
,
unsigned
long
pfn
)
{
/* Invalidate any entries in either cache for the vma within the user
address space vma->vm_mm for the page starting at virtual address
...
...
@@ -915,7 +890,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr)
Note(1), this is called with mm->page_table_lock held.
*/
sh64_dcache_purge_
virt_page
(
vma
->
vm_mm
,
eaddr
);
sh64_dcache_purge_
phy_page
(
pfn
<<
PAGE_SHIFT
);
if
(
vma
->
vm_flags
&
VM_EXEC
)
{
sh64_icache_inv_user_page
(
vma
,
eaddr
);
...
...
arch/sparc/mm/srmmu.c
View file @
2b7df078
...
...
@@ -1003,8 +1003,7 @@ extern void viking_flush_cache_all(void);
extern
void
viking_flush_cache_mm
(
struct
mm_struct
*
mm
);
extern
void
viking_flush_cache_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
extern
void
viking_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
);
extern
void
viking_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
);
extern
void
viking_flush_page_to_ram
(
unsigned
long
page
);
extern
void
viking_flush_page_for_dma
(
unsigned
long
page
);
extern
void
viking_flush_sig_insns
(
struct
mm_struct
*
mm
,
unsigned
long
addr
);
...
...
fs/binfmt_elf.c
View file @
2b7df078
...
...
@@ -1603,7 +1603,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
DUMP_SEEK
(
file
->
f_pos
+
PAGE_SIZE
);
}
else
{
void
*
kaddr
;
flush_cache_page
(
vma
,
addr
);
flush_cache_page
(
vma
,
addr
,
page_to_pfn
(
page
)
);
kaddr
=
kmap
(
page
);
if
((
size
+=
PAGE_SIZE
)
>
limit
||
!
dump_write
(
file
,
kaddr
,
...
...
include/asm-alpha/cacheflush.h
View file @
2b7df078
...
...
@@ -8,7 +8,7 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr
)
do { } while (0)
#define flush_cache_page(vma, vmaddr
, pfn)
do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
...
...
include/asm-arm/cacheflush.h
View file @
2b7df078
...
...
@@ -237,16 +237,16 @@ extern void dmac_flush_range(unsigned long, unsigned long);
* space" model to handle this.
*/
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr
);
\
memcpy(dst, src, len); \
flush_dcache_page(page); \
do {
\
flush_cache_page(vma, vaddr
, page_to_pfn(page));
\
memcpy(dst, src, len);
\
flush_dcache_page(page);
\
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr
);
\
memcpy(dst, src, len); \
do {
\
flush_cache_page(vma, vaddr
, page_to_pfn(page));
\
memcpy(dst, src, len);
\
} while (0)
/*
...
...
@@ -269,7 +269,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
}
static
inline
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
user_addr
)
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
user_addr
,
unsigned
long
pfn
)
{
if
(
cpu_isset
(
smp_processor_id
(),
vma
->
vm_mm
->
cpu_vm_mask
))
{
unsigned
long
addr
=
user_addr
&
PAGE_MASK
;
...
...
include/asm-arm26/cacheflush.h
View file @
2b7df078
...
...
@@ -23,7 +23,7 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma,start,end) do { } while (0)
#define flush_cache_page(vma,vmaddr
)
do { } while (0)
#define flush_cache_page(vma,vmaddr
,pfn)
do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
...
...
include/asm-cris/cacheflush.h
View file @
2b7df078
...
...
@@ -10,7 +10,7 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr
)
do { } while (0)
#define flush_cache_page(vma, vmaddr
, pfn)
do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
...
...
include/asm-frv/cacheflush.h
View file @
2b7df078
...
...
@@ -21,7 +21,7 @@
#define flush_cache_all() do {} while(0)
#define flush_cache_mm(mm) do {} while(0)
#define flush_cache_range(mm, start, end) do {} while(0)
#define flush_cache_page(vma, vmaddr
)
do {} while(0)
#define flush_cache_page(vma, vmaddr
, pfn)
do {} while(0)
#define flush_cache_vmap(start, end) do {} while(0)
#define flush_cache_vunmap(start, end) do {} while(0)
#define flush_dcache_mmap_lock(mapping) do {} while(0)
...
...
include/asm-h8300/cacheflush.h
View file @
2b7df078
...
...
@@ -13,7 +13,7 @@
#define flush_cache_all()
#define flush_cache_mm(mm)
#define flush_cache_range(vma,a,b)
#define flush_cache_page(vma,p)
#define flush_cache_page(vma,p
,pfn
)
#define flush_dcache_page(page)
#define flush_dcache_mmap_lock(mapping)
#define flush_dcache_mmap_unlock(mapping)
...
...
include/asm-i386/cacheflush.h
View file @
2b7df078
...
...
@@ -8,7 +8,7 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr
)
do { } while (0)
#define flush_cache_page(vma, vmaddr
, pfn)
do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
...
...
include/asm-ia64/cacheflush.h
View file @
2b7df078
...
...
@@ -19,7 +19,7 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr
)
do { } while (0)
#define flush_cache_page(vma, vmaddr
, pfn)
do { } while (0)
#define flush_icache_page(vma,page) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
...
...
include/asm-m32r/cacheflush.h
View file @
2b7df078
...
...
@@ -11,7 +11,7 @@ extern void _flush_cache_copyback_all(void);
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr
)
do { } while (0)
#define flush_cache_page(vma, vmaddr
, pfn)
do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
...
...
@@ -31,7 +31,7 @@ extern void smp_flush_cache_all(void);
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr
)
do { } while (0)
#define flush_cache_page(vma, vmaddr
, pfn)
do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
...
...
@@ -43,7 +43,7 @@ extern void smp_flush_cache_all(void);
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr
)
do { } while (0)
#define flush_cache_page(vma, vmaddr
, pfn)
do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
...
...
include/asm-m68k/cacheflush.h
View file @
2b7df078
...
...
@@ -99,8 +99,7 @@ static inline void flush_cache_range(struct vm_area_struct *vma,
__flush_cache_030
();
}
static
inline
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
static
inline
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
,
unsigned
long
pfn
)
{
if
(
vma
->
vm_mm
==
current
->
mm
)
__flush_cache_030
();
...
...
@@ -134,15 +133,15 @@ static inline void __flush_page_to_ram(void *vaddr)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr
);
\
memcpy(dst, src, len); \
do {
\
flush_cache_page(vma, vaddr
, page_to_pfn(page));
\
memcpy(dst, src, len);
\
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr
);
\
memcpy(dst, src, len); \
do {
\
flush_cache_page(vma, vaddr
, page_to_pfn(page));
\
memcpy(dst, src, len);
\
} while (0)
extern
void
flush_icache_range
(
unsigned
long
address
,
unsigned
long
endaddr
);
...
...
include/asm-m68knommu/cacheflush.h
View file @
2b7df078
...
...
@@ -9,7 +9,7 @@
#define flush_cache_all() __flush_cache_all()
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr
)
do { } while (0)
#define flush_cache_page(vma, vmaddr
, pfn)
do { } while (0)
#define flush_dcache_range(start,len) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
...
...
include/asm-mips/cacheflush.h
View file @
2b7df078
...
...
@@ -17,7 +17,7 @@
*
* - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_page(mm, vmaddr) flushes a single page
* - flush_cache_page(mm, vmaddr
, pfn
) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages
* - flush_icache_range(start, end) flush a range of instructions
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
...
...
@@ -34,8 +34,7 @@ extern void (*__flush_cache_all)(void);
extern
void
(
*
flush_cache_mm
)(
struct
mm_struct
*
mm
);
extern
void
(
*
flush_cache_range
)(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
extern
void
(
*
flush_cache_page
)(
struct
vm_area_struct
*
vma
,
unsigned
long
page
);
extern
void
(
*
flush_cache_page
)(
struct
vm_area_struct
*
vma
,
unsigned
long
page
,
unsigned
long
pfn
);
extern
void
__flush_dcache_page
(
struct
page
*
page
);
static
inline
void
flush_dcache_page
(
struct
page
*
page
)
...
...
include/asm-parisc/cacheflush.h
View file @
2b7df078
...
...
@@ -67,14 +67,14 @@ extern void flush_dcache_page(struct page *page);
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr); \
flush_cache_page(vma, vaddr
, page_to_pfn(page)
); \
memcpy(dst, src, len); \
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr); \
flush_cache_page(vma, vaddr
, page_to_pfn(page)
); \
memcpy(dst, src, len); \
} while (0)
...
...
@@ -170,7 +170,7 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
}
static
inline
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
,
unsigned
long
pfn
)
{
BUG_ON
(
!
vma
->
vm_mm
->
context
);
...
...
include/asm-ppc/cacheflush.h
View file @
2b7df078
...
...
@@ -22,7 +22,7 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, a, b) do { } while (0)
#define flush_cache_page(vma, p) do { } while (0)
#define flush_cache_page(vma, p
, pfn
) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
...
...
include/asm-ppc64/cacheflush.h
View file @
2b7df078
...
...
@@ -12,7 +12,7 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr
)
do { } while (0)
#define flush_cache_page(vma, vmaddr
, pfn)
do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
...
...
include/asm-s390/cacheflush.h
View file @
2b7df078
...
...
@@ -8,7 +8,7 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr
)
do { } while (0)
#define flush_cache_page(vma, vmaddr
, pfn)
do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
...
...
include/asm-sh/cacheflush.h
View file @
2b7df078
...
...
@@ -15,14 +15,14 @@ extern void __flush_invalidate_region(void *start, int size);
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr
);
\
flush_cache_page(vma, vaddr
, page_to_pfn(page));
\
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr
);
\
flush_cache_page(vma, vaddr
, page_to_pfn(page));
\
memcpy(dst, src, len); \
} while (0)
...
...
include/asm-sh/cpu-sh2/cacheflush.h
View file @
2b7df078
...
...
@@ -15,7 +15,7 @@
*
* - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_page(mm, vmaddr) flushes a single page
* - flush_cache_page(mm, vmaddr
, pfn
) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages
*
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
...
...
@@ -28,7 +28,7 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr
)
do { } while (0)
#define flush_cache_page(vma, vmaddr
, pfn)
do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
...
...
include/asm-sh/cpu-sh3/cacheflush.h
View file @
2b7df078
...
...
@@ -15,7 +15,7 @@
*
* - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_page(mm, vmaddr) flushes a single page
* - flush_cache_page(mm, vmaddr
, pfn
) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages
*
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
...
...
@@ -43,7 +43,7 @@ extern void flush_cache_all(void);
extern
void
flush_cache_mm
(
struct
mm_struct
*
mm
);
extern
void
flush_cache_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
extern
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
);
extern
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
,
unsigned
long
pfn
);
extern
void
flush_dcache_page
(
struct
page
*
pg
);
extern
void
flush_icache_range
(
unsigned
long
start
,
unsigned
long
end
);
extern
void
flush_icache_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
);
...
...
@@ -68,7 +68,7 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr
)
do { } while (0)
#define flush_cache_page(vma, vmaddr
, pfn)
do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
...
...
include/asm-sh/cpu-sh4/cacheflush.h
View file @
2b7df078
...
...
@@ -28,7 +28,7 @@ extern void flush_cache_all(void);
extern
void
flush_cache_mm
(
struct
mm_struct
*
mm
);
extern
void
flush_cache_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
extern
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
);
extern
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
,
unsigned
long
pfn
);
extern
void
flush_dcache_page
(
struct
page
*
pg
);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
...
...
include/asm-sh64/cacheflush.h
View file @
2b7df078
...
...
@@ -14,7 +14,7 @@ extern void flush_cache_mm(struct mm_struct *mm);
extern
void
flush_cache_sigtramp
(
unsigned
long
start
,
unsigned
long
end
);
extern
void
flush_cache_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
extern
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
);
extern
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
,
unsigned
long
pfn
);
extern
void
flush_dcache_page
(
struct
page
*
pg
);
extern
void
flush_icache_range
(
unsigned
long
start
,
unsigned
long
end
);
extern
void
flush_icache_user_range
(
struct
vm_area_struct
*
vma
,
...
...
@@ -31,14 +31,14 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr
);
\
flush_cache_page(vma, vaddr
, page_to_pfn(page));
\
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr
);
\
flush_cache_page(vma, vaddr
, page_to_pfn(page));
\
memcpy(dst, src, len); \
} while (0)
...
...
include/asm-sparc/cacheflush.h
View file @
2b7df078
...
...
@@ -50,21 +50,21 @@ BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
#define flush_cache_page(vma,addr) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_cache_page(vma,addr
,pfn
) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr
);
\
memcpy(dst, src, len); \
do {
\
flush_cache_page(vma, vaddr
, page_to_pfn(page));
\
memcpy(dst, src, len);
\
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr
);
\
memcpy(dst, src, len); \
do {
\
flush_cache_page(vma, vaddr
, page_to_pfn(page));
\
memcpy(dst, src, len);
\
} while (0)
BTFIXUPDEF_CALL
(
void
,
__flush_page_to_ram
,
unsigned
long
)
...
...
include/asm-sparc64/cacheflush.h
View file @
2b7df078
...
...
@@ -11,7 +11,7 @@
do { if ((__mm) == current->mm) flushw_user(); } while(0)
#define flush_cache_range(vma, start, end) \
flush_cache_mm((vma)->vm_mm)
#define flush_cache_page(vma, page) \
#define flush_cache_page(vma, page
, pfn
) \
flush_cache_mm((vma)->vm_mm)
/*
...
...
@@ -38,15 +38,15 @@ extern void __flush_dcache_range(unsigned long start, unsigned long end);
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr
);
\
memcpy(dst, src, len); \
do {
\
flush_cache_page(vma, vaddr
, page_to_pfn(page));
\
memcpy(dst, src, len);
\
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr
);
\
memcpy(dst, src, len); \
do {
\
flush_cache_page(vma, vaddr
, page_to_pfn(page));
\
memcpy(dst, src, len);
\
} while (0)
extern
void
flush_dcache_page
(
struct
page
*
page
);
...
...
include/asm-v850/cacheflush.h
View file @
2b7df078
...
...
@@ -25,7 +25,7 @@
#define flush_cache_all() ((void)0)
#define flush_cache_mm(mm) ((void)0)
#define flush_cache_range(vma, start, end) ((void)0)
#define flush_cache_page(vma, vmaddr
)
((void)0)
#define flush_cache_page(vma, vmaddr
, pfn)
((void)0)
#define flush_dcache_page(page) ((void)0)
#define flush_dcache_mmap_lock(mapping) ((void)0)
#define flush_dcache_mmap_unlock(mapping) ((void)0)
...
...
include/asm-x86_64/cacheflush.h
View file @
2b7df078
...
...
@@ -8,7 +8,7 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr
)
do { } while (0)
#define flush_cache_page(vma, vmaddr
, pfn)
do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
...
...
mm/fremap.c
View file @
2b7df078
...
...
@@ -30,7 +30,7 @@ static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
if
(
pte_present
(
pte
))
{
unsigned
long
pfn
=
pte_pfn
(
pte
);
flush_cache_page
(
vma
,
addr
);
flush_cache_page
(
vma
,
addr
,
pfn
);
pte
=
ptep_clear_flush
(
vma
,
addr
,
ptep
);
if
(
pfn_valid
(
pfn
))
{
struct
page
*
page
=
pfn_to_page
(
pfn
);
...
...
mm/memory.c
View file @
2b7df078
...
...
@@ -1250,7 +1250,6 @@ static inline void break_cow(struct vm_area_struct * vma, struct page * new_page
{
pte_t
entry
;
flush_cache_page
(
vma
,
address
);
entry
=
maybe_mkwrite
(
pte_mkdirty
(
mk_pte
(
new_page
,
vma
->
vm_page_prot
)),
vma
);
ptep_establish
(
vma
,
address
,
page_table
,
entry
);
...
...
@@ -1302,7 +1301,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
int
reuse
=
can_share_swap_page
(
old_page
);
unlock_page
(
old_page
);
if
(
reuse
)
{
flush_cache_page
(
vma
,
address
);
flush_cache_page
(
vma
,
address
,
pfn
);
entry
=
maybe_mkwrite
(
pte_mkyoung
(
pte_mkdirty
(
pte
)),
vma
);
ptep_set_access_flags
(
vma
,
address
,
page_table
,
entry
,
1
);
...
...
@@ -1345,6 +1344,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
++
mm
->
rss
;
else
page_remove_rmap
(
old_page
);
flush_cache_page
(
vma
,
address
,
pfn
);
break_cow
(
vma
,
new_page
,
address
,
page_table
);
lru_cache_add_active
(
new_page
);
page_add_anon_rmap
(
new_page
,
vma
,
address
);
...
...
mm/rmap.c
View file @
2b7df078
...
...
@@ -573,7 +573,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
}
/* Nuke the page table entry. */
flush_cache_page
(
vma
,
address
);
flush_cache_page
(
vma
,
address
,
page_to_pfn
(
page
)
);
pteval
=
ptep_clear_flush
(
vma
,
address
,
pte
);
/* Move the dirty bit to the physical page now the pte is gone. */
...
...
@@ -690,7 +690,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
continue
;
/* Nuke the page table entry. */
flush_cache_page
(
vma
,
address
);
flush_cache_page
(
vma
,
address
,
pfn
);
pteval
=
ptep_clear_flush
(
vma
,
address
,
pte
);
/* If nonlinear, store the file page offset in the pte. */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment