Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
22f60da7
Commit
22f60da7
authored
Jul 11, 2002
by
David Mosberger
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ia64: TLB flushing fixes and reserve large-page syscall numbers.
parent
b2cad830
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
41 additions
and
10 deletions
+41
-10
arch/ia64/mm/tlb.c
arch/ia64/mm/tlb.c
+6
-1
include/asm-ia64/mmu_context.h
include/asm-ia64/mmu_context.h
+24
-5
include/asm-ia64/tlb.h
include/asm-ia64/tlb.h
+7
-4
include/asm-ia64/unistd.h
include/asm-ia64/unistd.h
+4
-0
No files found.
arch/ia64/mm/tlb.c
View file @
22f60da7
...
@@ -49,6 +49,7 @@ wrap_mmu_context (struct mm_struct *mm)
...
@@ -49,6 +49,7 @@ wrap_mmu_context (struct mm_struct *mm)
{
{
unsigned
long
tsk_context
,
max_ctx
=
ia64_ctx
.
max_ctx
;
unsigned
long
tsk_context
,
max_ctx
=
ia64_ctx
.
max_ctx
;
struct
task_struct
*
tsk
;
struct
task_struct
*
tsk
;
int
i
;
if
(
ia64_ctx
.
next
>
max_ctx
)
if
(
ia64_ctx
.
next
>
max_ctx
)
ia64_ctx
.
next
=
300
;
/* skip daemons */
ia64_ctx
.
next
=
300
;
/* skip daemons */
...
@@ -77,7 +78,11 @@ wrap_mmu_context (struct mm_struct *mm)
...
@@ -77,7 +78,11 @@ wrap_mmu_context (struct mm_struct *mm)
ia64_ctx
.
limit
=
tsk_context
;
ia64_ctx
.
limit
=
tsk_context
;
}
}
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
flush_tlb_all
();
/* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
for
(
i
=
0
;
i
<
smp_num_cpus
;
++
i
)
if
(
i
!=
smp_processor_id
())
per_cpu
(
ia64_need_tlb_flush
,
i
)
=
1
;
__flush_tlb_all
();
}
}
void
void
...
...
include/asm-ia64/mmu_context.h
View file @
22f60da7
...
@@ -2,8 +2,8 @@
...
@@ -2,8 +2,8 @@
#define _ASM_IA64_MMU_CONTEXT_H
#define _ASM_IA64_MMU_CONTEXT_H
/*
/*
* Copyright (C) 1998-200
1
Hewlett-Packard Co
* Copyright (C) 1998-200
2
Hewlett-Packard Co
*
Copyright (C) 1998-2001
David Mosberger-Tang <davidm@hpl.hp.com>
*
David Mosberger-Tang <davidm@hpl.hp.com>
*/
*/
/*
/*
...
@@ -13,8 +13,6 @@
...
@@ -13,8 +13,6 @@
* consider the region number when performing a TLB lookup, we need to assign a unique
* consider the region number when performing a TLB lookup, we need to assign a unique
* region id to each region in a process. We use the least significant three bits in a
* region id to each region in a process. We use the least significant three bits in a
* region id for this purpose.
* region id for this purpose.
*
* Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com>
*/
*/
#define IA64_REGION_ID_KERNEL 0
/* the kernel's region id (tlb.c depends on this being 0) */
#define IA64_REGION_ID_KERNEL 0
/* the kernel's region id (tlb.c depends on this being 0) */
...
@@ -23,6 +21,8 @@
...
@@ -23,6 +21,8 @@
# ifndef __ASSEMBLY__
# ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/spinlock.h>
...
@@ -36,6 +36,7 @@ struct ia64_ctx {
...
@@ -36,6 +36,7 @@ struct ia64_ctx {
};
};
extern
struct
ia64_ctx
ia64_ctx
;
extern
struct
ia64_ctx
ia64_ctx
;
extern
u8
ia64_need_tlb_flush
__per_cpu_data
;
extern
void
wrap_mmu_context
(
struct
mm_struct
*
mm
);
extern
void
wrap_mmu_context
(
struct
mm_struct
*
mm
);
...
@@ -44,9 +45,28 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
...
@@ -44,9 +45,28 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
{
}
}
/*
* When the context counter wraps around all TLBs need to be flushed because an old
* context number might have been reused. This is signalled by the ia64_need_tlb_flush
* per-CPU variable, which is checked in the routine below. Called by activate_mm().
* <efocht@ess.nec.de>
*/
static
inline
void
delayed_tlb_flush
(
void
)
{
extern
void
__flush_tlb_all
(
void
);
if
(
unlikely
(
ia64_need_tlb_flush
))
{
__flush_tlb_all
();
__ia64_need_tlb_flush
=
0
;
}
}
static
inline
void
static
inline
void
get_new_mmu_context
(
struct
mm_struct
*
mm
)
get_new_mmu_context
(
struct
mm_struct
*
mm
)
{
{
delayed_tlb_flush
();
spin_lock
(
&
ia64_ctx
.
lock
);
spin_lock
(
&
ia64_ctx
.
lock
);
{
{
if
(
ia64_ctx
.
next
>=
ia64_ctx
.
limit
)
if
(
ia64_ctx
.
next
>=
ia64_ctx
.
limit
)
...
@@ -54,7 +74,6 @@ get_new_mmu_context (struct mm_struct *mm)
...
@@ -54,7 +74,6 @@ get_new_mmu_context (struct mm_struct *mm)
mm
->
context
=
ia64_ctx
.
next
++
;
mm
->
context
=
ia64_ctx
.
next
++
;
}
}
spin_unlock
(
&
ia64_ctx
.
lock
);
spin_unlock
(
&
ia64_ctx
.
lock
);
}
}
static
inline
void
static
inline
void
...
...
include/asm-ia64/tlb.h
View file @
22f60da7
...
@@ -72,12 +72,15 @@ ia64_tlb_flush_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
...
@@ -72,12 +72,15 @@ ia64_tlb_flush_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
{
{
unsigned
long
nr
;
unsigned
long
nr
;
if
(
end
-
start
>=
1024
*
1024
*
1024
*
1024UL
)
{
if
(
unlikely
(
end
-
start
>=
1024
*
1024
*
1024
*
1024UL
||
rgn_index
(
start
)
!=
rgn_index
(
end
-
1
)))
{
/*
/*
* If we flush more than a tera-byte, we're probably better off just
* If we flush more than a tera-byte or across regions, we're probably
* flushing the entire address space.
* better off just flushing the entire TLB(s). This should be very rare
* and is not worth optimizing for.
*/
*/
flush_tlb_
mm
(
tlb
->
mm
);
flush_tlb_
all
(
);
}
else
{
}
else
{
/*
/*
* XXX fix me: flush_tlb_range() should take an mm pointer instead of a
* XXX fix me: flush_tlb_range() should take an mm pointer instead of a
...
...
include/asm-ia64/unistd.h
View file @
22f60da7
...
@@ -223,6 +223,10 @@
...
@@ -223,6 +223,10 @@
#define __NR_sched_setaffinity 1231
#define __NR_sched_setaffinity 1231
#define __NR_sched_getaffinity 1232
#define __NR_sched_getaffinity 1232
#define __NR_security 1233
#define __NR_security 1233
#define __NR_get_large_pages 1234
#define __NR_free_large_pages 1235
#define __NR_share_large_pages 1236
#define __NR_unshare_large_pages 1237
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment