Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
40574c27
Commit
40574c27
authored
Apr 10, 2002
by
David Mosberger
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ia64: Add asm/cacheflush.h and asm/tlbflush.h.
parent
6db48033
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
149 additions
and
142 deletions
+149
-142
arch/ia64/kernel/pci.c
arch/ia64/kernel/pci.c
+1
-1
include/asm-ia64/cacheflush.h
include/asm-ia64/cacheflush.h
+39
-0
include/asm-ia64/pgalloc.h
include/asm-ia64/pgalloc.h
+0
-90
include/asm-ia64/pgtable.h
include/asm-ia64/pgtable.h
+26
-51
include/asm-ia64/tlbflush.h
include/asm-ia64/tlbflush.h
+83
-0
No files found.
arch/ia64/kernel/pci.c
View file @
40574c27
...
@@ -42,7 +42,7 @@
...
@@ -42,7 +42,7 @@
extern
void
ia64_mca_check_errors
(
void
);
extern
void
ia64_mca_check_errors
(
void
);
#endif
#endif
struct
pci_fixup
pcibios_fixups
[];
struct
pci_fixup
pcibios_fixups
[
1
];
struct
pci_ops
*
pci_root_ops
;
struct
pci_ops
*
pci_root_ops
;
...
...
include/asm-ia64/cacheflush.h
0 → 100644
View file @
40574c27
#ifndef _ASM_IA64_CACHEFLUSH_H
#define _ASM_IA64_CACHEFLUSH_H
/*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/mm.h>
#include <asm/bitops.h>
#include <asm/page.h>
/*
* Cache flushing routines. This is the kind of stuff that can be very expensive, so try
* to avoid them whenever possible.
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_page(vma,page) do { } while (0)
#define flush_dcache_page(page) \
do { \
clear_bit(PG_arch_1, &page->flags); \
} while (0)
extern
void
flush_icache_range
(
unsigned
long
start
,
unsigned
long
end
);
#define flush_icache_user_range(vma, page, user_addr, len) \
do { \
unsigned long _addr = page_address(page) + ((user_addr) & ~PAGE_MASK); \
flush_icache_range(_addr, _addr + (len)); \
} while (0)
#endif
/* _ASM_IA64_CACHEFLUSH_H */
include/asm-ia64/pgalloc.h
View file @
40574c27
...
@@ -152,94 +152,4 @@ pte_free_kernel (pte_t *pte)
...
@@ -152,94 +152,4 @@ pte_free_kernel (pte_t *pte)
extern
int
do_check_pgt_cache
(
int
,
int
);
extern
int
do_check_pgt_cache
(
int
,
int
);
/*
* IA-64 doesn't have any external MMU info: the page tables contain all the necessary
* information. However, we use this macro to take care of any (delayed) i-cache flushing
* that may be necessary.
*/
static
inline
void
update_mmu_cache
(
struct
vm_area_struct
*
vma
,
unsigned
long
vaddr
,
pte_t
pte
)
{
unsigned
long
addr
;
struct
page
*
page
;
if
(
!
pte_exec
(
pte
))
return
;
/* not an executable page... */
page
=
pte_page
(
pte
);
/* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
addr
=
(
unsigned
long
)
page_address
(
page
);
if
(
test_bit
(
PG_arch_1
,
&
page
->
flags
))
return
;
/* i-cache is already coherent with d-cache */
flush_icache_range
(
addr
,
addr
+
PAGE_SIZE
);
set_bit
(
PG_arch_1
,
&
page
->
flags
);
/* mark page as clean */
}
/*
* Now for some TLB flushing routines. This is the kind of stuff that
* can be very expensive, so try to avoid them whenever possible.
*/
/*
* Flush everything (kernel mapping may also have changed due to
* vmalloc/vfree).
*/
extern
void
__flush_tlb_all
(
void
);
#ifdef CONFIG_SMP
extern
void
smp_flush_tlb_all
(
void
);
# define flush_tlb_all() smp_flush_tlb_all()
#else
# define flush_tlb_all() __flush_tlb_all()
#endif
/*
* Flush a specified user mapping
*/
static
inline
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
if
(
mm
)
{
mm
->
context
=
0
;
if
(
mm
==
current
->
active_mm
)
{
/* This is called, e.g., as a result of exec(). */
get_new_mmu_context
(
mm
);
reload_context
(
mm
);
}
}
}
extern
void
flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
/*
* Page-granular tlb flush.
*/
static
inline
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
)
{
#ifdef CONFIG_SMP
flush_tlb_range
(
vma
,
(
addr
&
PAGE_MASK
),
(
addr
&
PAGE_MASK
)
+
PAGE_SIZE
);
#else
if
(
vma
->
vm_mm
==
current
->
active_mm
)
asm
volatile
(
"ptc.l %0,%1"
::
"r"
(
addr
),
"r"
(
PAGE_SHIFT
<<
2
)
:
"memory"
);
#endif
}
/*
* Flush the TLB entries mapping the virtually mapped linear page
* table corresponding to address range [START-END).
*/
static
inline
void
flush_tlb_pgtables
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
{
struct
vm_area_struct
vma
;
if
(
rgn_index
(
start
)
!=
rgn_index
(
end
))
printk
(
"flush_tlb_pgtables: can't flush across regions!!
\n
"
);
vma
.
vm_mm
=
mm
;
flush_tlb_range
(
&
vma
,
ia64_thash
(
start
),
ia64_thash
(
end
));
}
#endif
/* _ASM_IA64_PGALLOC_H */
#endif
/* _ASM_IA64_PGALLOC_H */
include/asm-ia64/pgtable.h
View file @
40574c27
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
#include <linux/config.h>
#include <linux/config.h>
#include <asm/cacheflush.h>
#include <asm/mman.h>
#include <asm/mman.h>
#include <asm/page.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/processor.h>
...
@@ -290,30 +291,6 @@ ia64_phys_addr_valid (unsigned long addr)
...
@@ -290,30 +291,6 @@ ia64_phys_addr_valid (unsigned long addr)
# define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
# define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
#endif
#endif
/*
* Return the region index for virtual address ADDRESS.
*/
static
inline
unsigned
long
rgn_index
(
unsigned
long
address
)
{
ia64_va
a
;
a
.
l
=
address
;
return
a
.
f
.
reg
;
}
/*
* Return the region offset for virtual address ADDRESS.
*/
static
inline
unsigned
long
rgn_offset
(
unsigned
long
address
)
{
ia64_va
a
;
a
.
l
=
address
;
return
a
.
f
.
off
;
}
static
inline
unsigned
long
static
inline
unsigned
long
pgd_index
(
unsigned
long
address
)
pgd_index
(
unsigned
long
address
)
{
{
...
@@ -429,6 +406,31 @@ pte_same (pte_t a, pte_t b)
...
@@ -429,6 +406,31 @@ pte_same (pte_t a, pte_t b)
extern
pgd_t
swapper_pg_dir
[
PTRS_PER_PGD
];
extern
pgd_t
swapper_pg_dir
[
PTRS_PER_PGD
];
extern
void
paging_init
(
void
);
extern
void
paging_init
(
void
);
/*
* IA-64 doesn't have any external MMU info: the page tables contain all the necessary
* information. However, we use this macro to take care of any (delayed) i-cache flushing
* that may be necessary.
*/
static
inline
void
update_mmu_cache
(
struct
vm_area_struct
*
vma
,
unsigned
long
vaddr
,
pte_t
pte
)
{
unsigned
long
addr
;
struct
page
*
page
;
if
(
!
pte_exec
(
pte
))
return
;
/* not an executable page... */
page
=
pte_page
(
pte
);
/* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
addr
=
(
unsigned
long
)
page_address
(
page
);
if
(
test_bit
(
PG_arch_1
,
&
page
->
flags
))
return
;
/* i-cache is already coherent with d-cache */
flush_icache_range
(
addr
,
addr
+
PAGE_SIZE
);
set_bit
(
PG_arch_1
,
&
page
->
flags
);
/* mark page as clean */
}
#define SWP_TYPE(entry) (((entry).val >> 1) & 0xff)
#define SWP_TYPE(entry) (((entry).val >> 1) & 0xff)
#define SWP_OFFSET(entry) (((entry).val << 1) >> 10)
#define SWP_OFFSET(entry) (((entry).val << 1) >> 10)
#define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 9) })
#define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 9) })
...
@@ -440,33 +442,6 @@ extern void paging_init (void);
...
@@ -440,33 +442,6 @@ extern void paging_init (void);
#define io_remap_page_range remap_page_range
/* XXX is this right? */
#define io_remap_page_range remap_page_range
/* XXX is this right? */
/*
* Now for some cache flushing routines. This is the kind of stuff that can be very
* expensive, so try to avoid them whenever possible.
*/
/* Caches aren't brain-dead on the IA-64. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_page(vma,page) do { } while (0)
#define flush_dcache_page(page) \
do { \
clear_bit(PG_arch_1, &page->flags); \
} while (0)
extern
void
flush_icache_range
(
unsigned
long
start
,
unsigned
long
end
);
#define flush_icache_user_range(vma, page, user_addr, len) \
do { \
unsigned long _addr = page_address(page) + ((user_addr) & ~PAGE_MASK); \
flush_icache_range(_addr, _addr + (len)); \
} while (0)
/*
/*
* ZERO_PAGE is a global shared page that is always zero: used
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
* for zero-mapped memory areas etc..
...
...
include/asm-ia64/tlbflush.h
0 → 100644
View file @
40574c27
#ifndef _ASM_IA64_TLBFLUSH_H
#define _ASM_IA64_TLBFLUSH_H
/*
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
/*
* Now for some TLB flushing routines. This is the kind of stuff that
* can be very expensive, so try to avoid them whenever possible.
*/
/*
* Flush everything (kernel mapping may also have changed due to
* vmalloc/vfree).
*/
extern
void
__flush_tlb_all
(
void
);
#ifdef CONFIG_SMP
extern
void
smp_flush_tlb_all
(
void
);
# define flush_tlb_all() smp_flush_tlb_all()
#else
# define flush_tlb_all() __flush_tlb_all()
#endif
/*
* Flush a specified user mapping
*/
static
inline
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
if
(
mm
)
{
mm
->
context
=
0
;
if
(
mm
==
current
->
active_mm
)
{
/* This is called, e.g., as a result of exec(). */
get_new_mmu_context
(
mm
);
reload_context
(
mm
);
}
}
}
extern
void
flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
/*
* Page-granular tlb flush.
*/
static
inline
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
)
{
#ifdef CONFIG_SMP
flush_tlb_range
(
vma
,
(
addr
&
PAGE_MASK
),
(
addr
&
PAGE_MASK
)
+
PAGE_SIZE
);
#else
if
(
vma
->
vm_mm
==
current
->
active_mm
)
asm
volatile
(
"ptc.l %0,%1"
::
"r"
(
addr
),
"r"
(
PAGE_SHIFT
<<
2
)
:
"memory"
);
#endif
}
/*
* Flush the TLB entries mapping the virtually mapped linear page
* table corresponding to address range [START-END).
*/
static
inline
void
flush_tlb_pgtables
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
{
struct
vm_area_struct
vma
;
if
(
REGION_NUMBER
(
start
)
!=
REGION_NUMBER
(
end
))
printk
(
"flush_tlb_pgtables: can't flush across regions!!
\n
"
);
vma
.
vm_mm
=
mm
;
flush_tlb_range
(
&
vma
,
ia64_thash
(
start
),
ia64_thash
(
end
));
}
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
/* XXX fix me */
#endif
/* _ASM_IA64_TLBFLUSH_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment