Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
9658e7b7
Commit
9658e7b7
authored
Feb 27, 2004
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
http://lia64.bkbits.net/to-linus-2.5
into ppc970.osdl.org:/home/torvalds/v2.5/linux
parents
26d5aa5a
e455002a
Changes
16
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
322 additions
and
298 deletions
+322
-298
arch/ia64/Kconfig
arch/ia64/Kconfig
+0
-33
arch/ia64/kernel/head.S
arch/ia64/kernel/head.S
+13
-0
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/iosapic.c
+8
-8
arch/ia64/kernel/irq.c
arch/ia64/kernel/irq.c
+0
-2
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/irq_ia64.c
+2
-2
arch/ia64/kernel/ivt.S
arch/ia64/kernel/ivt.S
+3
-2
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/perfmon.c
+180
-177
arch/ia64/kernel/perfmon_default_smpl.c
arch/ia64/kernel/perfmon_default_smpl.c
+1
-0
arch/ia64/kernel/process.c
arch/ia64/kernel/process.c
+49
-37
arch/ia64/mm/hugetlbpage.c
arch/ia64/mm/hugetlbpage.c
+37
-5
arch/ia64/mm/init.c
arch/ia64/mm/init.c
+4
-0
include/asm-ia64/iosapic.h
include/asm-ia64/iosapic.h
+2
-2
include/asm-ia64/mmu_context.h
include/asm-ia64/mmu_context.h
+3
-2
include/asm-ia64/page.h
include/asm-ia64/page.h
+8
-24
include/asm-ia64/perfmon_default_smpl.h
include/asm-ia64/perfmon_default_smpl.h
+2
-2
include/asm-ia64/scatterlist.h
include/asm-ia64/scatterlist.h
+10
-2
No files found.
arch/ia64/Kconfig
View file @
9658e7b7
...
@@ -288,39 +288,6 @@ config FORCE_MAX_ZONEORDER
...
@@ -288,39 +288,6 @@ config FORCE_MAX_ZONEORDER
int
int
default "18"
default "18"
choice
prompt "Huge TLB page size"
depends on HUGETLB_PAGE
default HUGETLB_PAGE_SIZE_16MB
config HUGETLB_PAGE_SIZE_4GB
depends on MCKINLEY
bool "4GB"
config HUGETLB_PAGE_SIZE_1GB
depends on MCKINLEY
bool "1GB"
config HUGETLB_PAGE_SIZE_256MB
bool "256MB"
config HUGETLB_PAGE_SIZE_64MB
bool "64MB"
config HUGETLB_PAGE_SIZE_16MB
bool "16MB"
config HUGETLB_PAGE_SIZE_4MB
bool "4MB"
config HUGETLB_PAGE_SIZE_1MB
bool "1MB"
config HUGETLB_PAGE_SIZE_256KB
bool "256KB"
endchoice
config IA64_PAL_IDLE
config IA64_PAL_IDLE
bool "Use PAL_HALT_LIGHT in idle loop"
bool "Use PAL_HALT_LIGHT in idle loop"
help
help
...
...
arch/ia64/kernel/head.S
View file @
9658e7b7
...
@@ -816,6 +816,19 @@ GLOBAL_ENTRY(ia64_delay_loop)
...
@@ -816,6 +816,19 @@ GLOBAL_ENTRY(ia64_delay_loop)
br.ret.sptk.many
rp
br.ret.sptk.many
rp
END
(
ia64_delay_loop
)
END
(
ia64_delay_loop
)
GLOBAL_ENTRY
(
ia64_invoke_kernel_thread_helper
)
.
prologue
.
save
rp
,
r0
//
this
is
the
end
of
the
call
-
chain
.
body
alloc
r2
=
ar
.
pfs
,
0
,
0
,
2
,
0
mov
out0
=
r9
mov
out1
=
r11
;;
br.call.sptk.many
rp
=
kernel_thread_helper
;;
mov
out0
=
r8
br.call.sptk.many
rp
=
sys_exit
;;
1
:
br.sptk.few
1
b
//
not
reached
END
(
ia64_invoke_kernel_thread_helper
)
#ifdef CONFIG_IA64_BRL_EMU
#ifdef CONFIG_IA64_BRL_EMU
/*
/*
...
...
arch/ia64/kernel/iosapic.c
View file @
9658e7b7
...
@@ -103,6 +103,7 @@ static spinlock_t iosapic_lock = SPIN_LOCK_UNLOCKED;
...
@@ -103,6 +103,7 @@ static spinlock_t iosapic_lock = SPIN_LOCK_UNLOCKED;
static
struct
iosapic_intr_info
{
static
struct
iosapic_intr_info
{
char
*
addr
;
/* base address of IOSAPIC */
char
*
addr
;
/* base address of IOSAPIC */
u32
low32
;
/* current value of low word of Redirection table entry */
unsigned
int
gsi_base
;
/* first GSI assigned to this IOSAPIC */
unsigned
int
gsi_base
;
/* first GSI assigned to this IOSAPIC */
char
rte_index
;
/* IOSAPIC RTE index (-1 => not an IOSAPIC interrupt) */
char
rte_index
;
/* IOSAPIC RTE index (-1 => not an IOSAPIC interrupt) */
unsigned
char
dmode
:
3
;
/* delivery mode (see iosapic.h) */
unsigned
char
dmode
:
3
;
/* delivery mode (see iosapic.h) */
...
@@ -213,6 +214,7 @@ set_rte (unsigned int vector, unsigned int dest)
...
@@ -213,6 +214,7 @@ set_rte (unsigned int vector, unsigned int dest)
writel
(
high32
,
addr
+
IOSAPIC_WINDOW
);
writel
(
high32
,
addr
+
IOSAPIC_WINDOW
);
writel
(
IOSAPIC_RTE_LOW
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
writel
(
IOSAPIC_RTE_LOW
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
writel
(
low32
,
addr
+
IOSAPIC_WINDOW
);
writel
(
low32
,
addr
+
IOSAPIC_WINDOW
);
iosapic_intr_info
[
vector
].
low32
=
low32
;
}
}
static
void
static
void
...
@@ -239,9 +241,10 @@ mask_irq (unsigned int irq)
...
@@ -239,9 +241,10 @@ mask_irq (unsigned int irq)
spin_lock_irqsave
(
&
iosapic_lock
,
flags
);
spin_lock_irqsave
(
&
iosapic_lock
,
flags
);
{
{
writel
(
IOSAPIC_RTE_LOW
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
writel
(
IOSAPIC_RTE_LOW
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
low32
=
readl
(
addr
+
IOSAPIC_WINDOW
);
low32
|=
(
1
<<
IOSAPIC_MASK_SHIFT
);
/* set only the mask bit */
/* set only the mask bit */
low32
=
iosapic_intr_info
[
vec
].
low32
|=
IOSAPIC_MASK
;
writel
(
low32
,
addr
+
IOSAPIC_WINDOW
);
writel
(
low32
,
addr
+
IOSAPIC_WINDOW
);
}
}
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
...
@@ -264,9 +267,7 @@ unmask_irq (unsigned int irq)
...
@@ -264,9 +267,7 @@ unmask_irq (unsigned int irq)
spin_lock_irqsave
(
&
iosapic_lock
,
flags
);
spin_lock_irqsave
(
&
iosapic_lock
,
flags
);
{
{
writel
(
IOSAPIC_RTE_LOW
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
writel
(
IOSAPIC_RTE_LOW
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
low32
=
readl
(
addr
+
IOSAPIC_WINDOW
);
low32
=
iosapic_intr_info
[
vec
].
low32
&=
~
IOSAPIC_MASK
;
low32
&=
~
(
1
<<
IOSAPIC_MASK_SHIFT
);
/* clear only the mask bit */
writel
(
low32
,
addr
+
IOSAPIC_WINDOW
);
writel
(
low32
,
addr
+
IOSAPIC_WINDOW
);
}
}
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
spin_unlock_irqrestore
(
&
iosapic_lock
,
flags
);
...
@@ -307,9 +308,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
...
@@ -307,9 +308,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
{
{
/* get current delivery mode by reading the low32 */
/* get current delivery mode by reading the low32 */
writel
(
IOSAPIC_RTE_LOW
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
writel
(
IOSAPIC_RTE_LOW
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
low32
=
readl
(
addr
+
IOSAPIC_WINDOW
);
low32
=
iosapic_intr_info
[
vec
].
low32
&
~
(
7
<<
IOSAPIC_DELIVERY_SHIFT
);
low32
&=
~
(
7
<<
IOSAPIC_DELIVERY_SHIFT
);
if
(
redir
)
if
(
redir
)
/* change delivery mode to lowest priority */
/* change delivery mode to lowest priority */
low32
|=
(
IOSAPIC_LOWEST_PRIORITY
<<
IOSAPIC_DELIVERY_SHIFT
);
low32
|=
(
IOSAPIC_LOWEST_PRIORITY
<<
IOSAPIC_DELIVERY_SHIFT
);
...
@@ -317,6 +316,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
...
@@ -317,6 +316,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
/* change delivery mode to fixed */
/* change delivery mode to fixed */
low32
|=
(
IOSAPIC_FIXED
<<
IOSAPIC_DELIVERY_SHIFT
);
low32
|=
(
IOSAPIC_FIXED
<<
IOSAPIC_DELIVERY_SHIFT
);
iosapic_intr_info
[
vec
].
low32
=
low32
;
writel
(
IOSAPIC_RTE_HIGH
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
writel
(
IOSAPIC_RTE_HIGH
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
writel
(
high32
,
addr
+
IOSAPIC_WINDOW
);
writel
(
high32
,
addr
+
IOSAPIC_WINDOW
);
writel
(
IOSAPIC_RTE_LOW
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
writel
(
IOSAPIC_RTE_LOW
(
rte_index
),
addr
+
IOSAPIC_REG_SELECT
);
...
...
arch/ia64/kernel/irq.c
View file @
9658e7b7
...
@@ -455,7 +455,6 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
...
@@ -455,7 +455,6 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
unsigned
int
status
;
unsigned
int
status
;
int
cpu
;
int
cpu
;
irq_enter
();
cpu
=
smp_processor_id
();
/* for CONFIG_PREEMPT, this must come after irq_enter()! */
cpu
=
smp_processor_id
();
/* for CONFIG_PREEMPT, this must come after irq_enter()! */
kstat_cpu
(
cpu
).
irqs
[
irq
]
++
;
kstat_cpu
(
cpu
).
irqs
[
irq
]
++
;
...
@@ -525,7 +524,6 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
...
@@ -525,7 +524,6 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
desc
->
handler
->
end
(
irq
);
desc
->
handler
->
end
(
irq
);
spin_unlock
(
&
desc
->
lock
);
spin_unlock
(
&
desc
->
lock
);
}
}
irq_exit
();
return
1
;
return
1
;
}
}
...
...
arch/ia64/kernel/irq_ia64.c
View file @
9658e7b7
...
@@ -120,6 +120,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
...
@@ -120,6 +120,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* 16 (without this, it would be ~240, which could easily lead
* 16 (without this, it would be ~240, which could easily lead
* to kernel stack overflows).
* to kernel stack overflows).
*/
*/
irq_enter
();
saved_tpr
=
ia64_getreg
(
_IA64_REG_CR_TPR
);
saved_tpr
=
ia64_getreg
(
_IA64_REG_CR_TPR
);
ia64_srlz_d
();
ia64_srlz_d
();
while
(
vector
!=
IA64_SPURIOUS_INT_VECTOR
)
{
while
(
vector
!=
IA64_SPURIOUS_INT_VECTOR
)
{
...
@@ -143,8 +144,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
...
@@ -143,8 +144,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
* handler needs to be able to wait for further keyboard interrupts, which can't
* handler needs to be able to wait for further keyboard interrupts, which can't
* come through until ia64_eoi() has been done.
* come through until ia64_eoi() has been done.
*/
*/
if
(
local_softirq_pending
())
irq_exit
();
do_softirq
();
}
}
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
...
...
arch/ia64/kernel/ivt.S
View file @
9658e7b7
...
@@ -118,10 +118,11 @@ ENTRY(vhpt_miss)
...
@@ -118,10 +118,11 @@ ENTRY(vhpt_miss)
#ifdef CONFIG_HUGETLB_PAGE
#ifdef CONFIG_HUGETLB_PAGE
extr.u
r26
=
r25
,
2
,
6
extr.u
r26
=
r25
,
2
,
6
;;
;;
cmp.eq
p8
,
p0
=
HPAGE_SHIFT
,
r26
cmp.ne
p8
,
p0
=
r18
,
r26
sub
r27
=
r26
,
r18
;;
;;
(
p8
)
dep
r25
=
r18
,
r25
,
2
,
6
(
p8
)
dep
r25
=
r18
,
r25
,
2
,
6
(
p8
)
shr
r22
=
r22
,
HPAGE_SHIFT
-
PAGE_SHIFT
(
p8
)
shr
r22
=
r22
,
r27
#endif
#endif
;;
;;
cmp.eq
p6
,
p7
=
5
,
r17
//
is
IFA
pointing
into
to
region
5
?
cmp.eq
p6
,
p7
=
5
,
r17
//
is
IFA
pointing
into
to
region
5
?
...
...
arch/ia64/kernel/perfmon.c
View file @
9658e7b7
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/perfmon_default_smpl.c
View file @
9658e7b7
...
@@ -178,6 +178,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
...
@@ -178,6 +178,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
ent
->
tstamp
=
stamp
;
ent
->
tstamp
=
stamp
;
ent
->
cpu
=
smp_processor_id
();
ent
->
cpu
=
smp_processor_id
();
ent
->
set
=
arg
->
active_set
;
ent
->
set
=
arg
->
active_set
;
ent
->
tgid
=
current
->
tgid
;
/*
/*
* selectively store PMDs in increasing index number
* selectively store PMDs in increasing index number
...
...
arch/ia64/kernel/process.c
View file @
9658e7b7
...
@@ -259,10 +259,12 @@ ia64_load_extra (struct task_struct *task)
...
@@ -259,10 +259,12 @@ ia64_load_extra (struct task_struct *task)
*
*
* We get here through the following call chain:
* We get here through the following call chain:
*
*
* <clone syscall>
* from user-level: from kernel:
* sys_clone
*
* do_fork
* <clone syscall> <some kernel call frames>
* copy_thread
* sys_clone :
* do_fork do_fork
* copy_thread copy_thread
*
*
* This means that the stack layout is as follows:
* This means that the stack layout is as follows:
*
*
...
@@ -276,9 +278,6 @@ ia64_load_extra (struct task_struct *task)
...
@@ -276,9 +278,6 @@ ia64_load_extra (struct task_struct *task)
* | | <-- sp (lowest addr)
* | | <-- sp (lowest addr)
* +---------------------+
* +---------------------+
*
*
* Note: if we get called through kernel_thread() then the memory above "(highest addr)"
* is valid kernel stack memory that needs to be copied as well.
*
* Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an
* Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an
* integer to address X causes bit N in ar.unat to be set to the NaT bit of the register,
* integer to address X causes bit N in ar.unat to be set to the NaT bit of the register,
* with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the
* with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the
...
@@ -291,9 +290,9 @@ copy_thread (int nr, unsigned long clone_flags,
...
@@ -291,9 +290,9 @@ copy_thread (int nr, unsigned long clone_flags,
unsigned
long
user_stack_base
,
unsigned
long
user_stack_size
,
unsigned
long
user_stack_base
,
unsigned
long
user_stack_size
,
struct
task_struct
*
p
,
struct
pt_regs
*
regs
)
struct
task_struct
*
p
,
struct
pt_regs
*
regs
)
{
{
unsigned
long
rbs
,
child_rbs
,
rbs_size
,
stack_offset
,
stack_top
,
stack_used
;
struct
switch_stack
*
child_stack
,
*
stack
;
extern
char
ia64_ret_from_clone
,
ia32_ret_from_clone
;
extern
char
ia64_ret_from_clone
,
ia32_ret_from_clone
;
struct
switch_stack
*
child_stack
,
*
stack
;
unsigned
long
rbs
,
child_rbs
,
rbs_size
;
struct
pt_regs
*
child_ptregs
;
struct
pt_regs
*
child_ptregs
;
int
retval
=
0
;
int
retval
=
0
;
...
@@ -306,16 +305,13 @@ copy_thread (int nr, unsigned long clone_flags,
...
@@ -306,16 +305,13 @@ copy_thread (int nr, unsigned long clone_flags,
return
0
;
return
0
;
#endif
#endif
stack_top
=
(
unsigned
long
)
current
+
IA64_STK_OFFSET
;
stack
=
((
struct
switch_stack
*
)
regs
)
-
1
;
stack
=
((
struct
switch_stack
*
)
regs
)
-
1
;
stack_used
=
stack_top
-
(
unsigned
long
)
stack
;
stack_offset
=
IA64_STK_OFFSET
-
stack_used
;
child_
stack
=
(
struct
switch_stack
*
)
((
unsigned
long
)
p
+
stack_offset
)
;
child_
ptregs
=
(
struct
pt_regs
*
)
((
unsigned
long
)
p
+
IA64_STK_OFFSET
)
-
1
;
child_
ptregs
=
(
struct
pt_regs
*
)
(
child_stack
+
1
)
;
child_
stack
=
(
struct
switch_stack
*
)
child_ptregs
-
1
;
/* copy parent's switch_stack & pt_regs to child: */
/* copy parent's switch_stack & pt_regs to child: */
memcpy
(
child_stack
,
stack
,
s
tack_used
);
memcpy
(
child_stack
,
stack
,
s
izeof
(
*
child_ptregs
)
+
sizeof
(
*
child_stack
)
);
rbs
=
(
unsigned
long
)
current
+
IA64_RBS_OFFSET
;
rbs
=
(
unsigned
long
)
current
+
IA64_RBS_OFFSET
;
child_rbs
=
(
unsigned
long
)
p
+
IA64_RBS_OFFSET
;
child_rbs
=
(
unsigned
long
)
p
+
IA64_RBS_OFFSET
;
...
@@ -324,7 +320,7 @@ copy_thread (int nr, unsigned long clone_flags,
...
@@ -324,7 +320,7 @@ copy_thread (int nr, unsigned long clone_flags,
/* copy the parent's register backing store to the child: */
/* copy the parent's register backing store to the child: */
memcpy
((
void
*
)
child_rbs
,
(
void
*
)
rbs
,
rbs_size
);
memcpy
((
void
*
)
child_rbs
,
(
void
*
)
rbs
,
rbs_size
);
if
(
user_mode
(
child_ptregs
))
{
if
(
likely
(
user_mode
(
child_ptregs
)
))
{
if
((
clone_flags
&
CLONE_SETTLS
)
&&
!
IS_IA32_PROCESS
(
regs
))
if
((
clone_flags
&
CLONE_SETTLS
)
&&
!
IS_IA32_PROCESS
(
regs
))
child_ptregs
->
r13
=
regs
->
r16
;
/* see sys_clone2() in entry.S */
child_ptregs
->
r13
=
regs
->
r16
;
/* see sys_clone2() in entry.S */
if
(
user_stack_base
)
{
if
(
user_stack_base
)
{
...
@@ -341,14 +337,14 @@ copy_thread (int nr, unsigned long clone_flags,
...
@@ -341,14 +337,14 @@ copy_thread (int nr, unsigned long clone_flags,
* been taken care of by the caller of sys_clone()
* been taken care of by the caller of sys_clone()
* already.
* already.
*/
*/
child_ptregs
->
r12
=
(
unsigned
long
)
(
child_ptregs
+
1
)
;
/* kernel sp */
child_ptregs
->
r12
=
(
unsigned
long
)
child_ptregs
-
16
;
/* kernel sp */
child_ptregs
->
r13
=
(
unsigned
long
)
p
;
/* set `current' pointer */
child_ptregs
->
r13
=
(
unsigned
long
)
p
;
/* set `current' pointer */
}
}
child_stack
->
ar_bspstore
=
child_rbs
+
rbs_size
;
if
(
IS_IA32_PROCESS
(
regs
))
if
(
IS_IA32_PROCESS
(
regs
))
child_stack
->
b0
=
(
unsigned
long
)
&
ia32_ret_from_clone
;
child_stack
->
b0
=
(
unsigned
long
)
&
ia32_ret_from_clone
;
else
else
child_stack
->
b0
=
(
unsigned
long
)
&
ia64_ret_from_clone
;
child_stack
->
b0
=
(
unsigned
long
)
&
ia64_ret_from_clone
;
child_stack
->
ar_bspstore
=
child_rbs
+
rbs_size
;
/* copy parts of thread_struct: */
/* copy parts of thread_struct: */
p
->
thread
.
ksp
=
(
unsigned
long
)
child_stack
-
16
;
p
->
thread
.
ksp
=
(
unsigned
long
)
child_stack
-
16
;
...
@@ -578,12 +574,32 @@ ia64_set_personality (struct elf64_hdr *elf_ex, int ibcs2_interpreter)
...
@@ -578,12 +574,32 @@ ia64_set_personality (struct elf64_hdr *elf_ex, int ibcs2_interpreter)
pid_t
pid_t
kernel_thread
(
int
(
*
fn
)(
void
*
),
void
*
arg
,
unsigned
long
flags
)
kernel_thread
(
int
(
*
fn
)(
void
*
),
void
*
arg
,
unsigned
long
flags
)
{
{
struct
task_struct
*
parent
=
current
;
extern
void
ia64_invoke_kernel_thread_helper
(
void
);
int
result
;
unsigned
long
*
helper_fptr
=
(
unsigned
long
*
)
&
ia64_invoke_kernel_thread_helper
;
pid_t
tid
;
struct
{
struct
switch_stack
sw
;
struct
pt_regs
pt
;
}
regs
;
memset
(
&
regs
,
0
,
sizeof
(
regs
));
regs
.
pt
.
cr_iip
=
helper_fptr
[
0
];
/* set entry point (IP) */
regs
.
pt
.
r1
=
helper_fptr
[
1
];
/* set GP */
regs
.
pt
.
r9
=
(
unsigned
long
)
fn
;
/* 1st argument */
regs
.
pt
.
r11
=
(
unsigned
long
)
arg
;
/* 2nd argument */
/* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */
regs
.
pt
.
cr_ipsr
=
ia64_getreg
(
_IA64_REG_PSR
)
|
IA64_PSR_BN
;
regs
.
pt
.
cr_ifs
=
1UL
<<
63
;
/* mark as valid, empty frame */
regs
.
sw
.
ar_fpsr
=
regs
.
pt
.
ar_fpsr
=
ia64_getreg
(
_IA64_REG_AR_FPSR
);
regs
.
sw
.
ar_bspstore
=
(
unsigned
long
)
current
+
IA64_RBS_OFFSET
;
return
do_fork
(
flags
|
CLONE_VM
|
CLONE_UNTRACED
,
0
,
&
regs
.
pt
,
0
,
NULL
,
NULL
);
}
EXPORT_SYMBOL
(
kernel_thread
);
tid
=
clone
(
flags
|
CLONE_VM
|
CLONE_UNTRACED
,
0
);
/* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */
if
(
parent
!=
current
)
{
int
kernel_thread_helper
(
int
(
*
fn
)(
void
*
),
void
*
arg
)
{
#ifdef CONFIG_IA32_SUPPORT
#ifdef CONFIG_IA32_SUPPORT
if
(
IS_IA32_PROCESS
(
ia64_task_regs
(
current
)))
{
if
(
IS_IA32_PROCESS
(
ia64_task_regs
(
current
)))
{
/* A kernel thread is always a 64-bit process. */
/* A kernel thread is always a 64-bit process. */
...
@@ -593,12 +609,8 @@ kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
...
@@ -593,12 +609,8 @@ kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
ia64_set_kr
(
IA64_KR_TSSD
,
current
->
thread
.
old_k1
);
ia64_set_kr
(
IA64_KR_TSSD
,
current
->
thread
.
old_k1
);
}
}
#endif
#endif
result
=
(
*
fn
)(
arg
);
return
(
*
fn
)(
arg
);
_exit
(
result
);
}
return
tid
;
}
}
EXPORT_SYMBOL
(
kernel_thread
);
/*
/*
* Flush thread state. This is called when a thread does an execve().
* Flush thread state. This is called when a thread does an execve().
...
...
arch/ia64/mm/hugetlbpage.c
View file @
9658e7b7
/*
/*
* IA-64 Huge TLB Page Support for Kernel.
* IA-64 Huge TLB Page Support for Kernel.
*
*
* Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
* Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
* Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
*
* Sep, 2003: add numa support
* Feb, 2004: dynamic hugetlb page size via boot parameter
*/
*/
#include <linux/config.h>
#include <linux/config.h>
...
@@ -18,11 +22,10 @@
...
@@ -18,11 +22,10 @@
#include <asm/tlb.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/tlbflush.h>
#define TASK_HPAGE_BASE (REGION_HPAGE << REGION_SHIFT)
static
long
htlbpagemem
;
static
long
htlbpagemem
;
int
htlbpage_max
;
int
htlbpage_max
;
static
long
htlbzone_pages
;
static
long
htlbzone_pages
;
unsigned
int
hpage_shift
=
HPAGE_SHIFT_DEFAULT
;
static
struct
list_head
hugepage_freelists
[
MAX_NUMNODES
];
static
struct
list_head
hugepage_freelists
[
MAX_NUMNODES
];
static
spinlock_t
htlbpage_lock
=
SPIN_LOCK_UNLOCKED
;
static
spinlock_t
htlbpage_lock
=
SPIN_LOCK_UNLOCKED
;
...
@@ -407,7 +410,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
...
@@ -407,7 +410,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
return
-
EINVAL
;
return
-
EINVAL
;
/* This code assumes that REGION_HPAGE != 0. */
/* This code assumes that REGION_HPAGE != 0. */
if
((
REGION_NUMBER
(
addr
)
!=
REGION_HPAGE
)
||
(
addr
&
(
HPAGE_SIZE
-
1
)))
if
((
REGION_NUMBER
(
addr
)
!=
REGION_HPAGE
)
||
(
addr
&
(
HPAGE_SIZE
-
1
)))
addr
=
TASK_HPAGE
_BASE
;
addr
=
HPAGE_REGION
_BASE
;
else
else
addr
=
ALIGN
(
addr
,
HPAGE_SIZE
);
addr
=
ALIGN
(
addr
,
HPAGE_SIZE
);
for
(
vmm
=
find_vma
(
current
->
mm
,
addr
);
;
vmm
=
vmm
->
vm_next
)
{
for
(
vmm
=
find_vma
(
current
->
mm
,
addr
);
;
vmm
=
vmm
->
vm_next
)
{
...
@@ -520,6 +523,35 @@ static int __init hugetlb_setup(char *s)
...
@@ -520,6 +523,35 @@ static int __init hugetlb_setup(char *s)
}
}
__setup
(
"hugepages="
,
hugetlb_setup
);
__setup
(
"hugepages="
,
hugetlb_setup
);
static
int
__init
hugetlb_setup_sz
(
char
*
str
)
{
u64
tr_pages
;
unsigned
long
long
size
;
if
(
ia64_pal_vm_page_size
(
&
tr_pages
,
NULL
)
!=
0
)
/*
* shouldn't happen, but just in case.
*/
tr_pages
=
0x15557000UL
;
size
=
memparse
(
str
,
&
str
);
if
(
*
str
||
(
size
&
(
size
-
1
))
||
!
(
tr_pages
&
size
)
||
size
<=
PAGE_SIZE
||
size
>=
(
1UL
<<
PAGE_SHIFT
<<
MAX_ORDER
))
{
printk
(
KERN_WARNING
"Invalid huge page size specified
\n
"
);
return
1
;
}
hpage_shift
=
__ffs
(
size
);
/*
* boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
* override here with new page shift.
*/
ia64_set_rr
(
HPAGE_REGION_BASE
,
hpage_shift
<<
2
);
return
1
;
}
__setup
(
"hugepagesz="
,
hugetlb_setup_sz
);
static
int
__init
hugetlb_init
(
void
)
static
int
__init
hugetlb_init
(
void
)
{
{
int
i
;
int
i
;
...
@@ -540,7 +572,7 @@ static int __init hugetlb_init(void)
...
@@ -540,7 +572,7 @@ static int __init hugetlb_init(void)
printk
(
"Total HugeTLB memory allocated, %ld
\n
"
,
htlbpagemem
);
printk
(
"Total HugeTLB memory allocated, %ld
\n
"
,
htlbpagemem
);
return
0
;
return
0
;
}
}
module_init
(
hugetlb_init
);
__initcall
(
hugetlb_init
);
int
hugetlb_report_meminfo
(
char
*
buf
)
int
hugetlb_report_meminfo
(
char
*
buf
)
{
{
...
...
arch/ia64/mm/init.c
View file @
9658e7b7
...
@@ -342,6 +342,10 @@ ia64_mmu_init (void *my_cpu_data)
...
@@ -342,6 +342,10 @@ ia64_mmu_init (void *my_cpu_data)
ia64_tlb_init
();
ia64_tlb_init
();
#ifdef CONFIG_HUGETLB_PAGE
ia64_set_rr
(
HPAGE_REGION_BASE
,
HPAGE_SHIFT
<<
2
);
#endif
#ifdef CONFIG_IA64_MCA
#ifdef CONFIG_IA64_MCA
cpu
=
smp_processor_id
();
cpu
=
smp_processor_id
();
...
...
include/asm-ia64/iosapic.h
View file @
9658e7b7
...
@@ -45,9 +45,9 @@
...
@@ -45,9 +45,9 @@
/*
/*
* Mask bit
* Mask bit
*/
*/
#define IOSAPIC_MASK_SHIFT 16
#define IOSAPIC_MASK_SHIFT 16
#define IOSAPIC_UNMASK 0
#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT)
#define IOSAPIC_MSAK 1
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
...
...
include/asm-ia64/mmu_context.h
View file @
9658e7b7
...
@@ -140,8 +140,9 @@ reload_context (mm_context_t context)
...
@@ -140,8 +140,9 @@ reload_context (mm_context_t context)
{
{
unsigned
long
rid
;
unsigned
long
rid
;
unsigned
long
rid_incr
=
0
;
unsigned
long
rid_incr
=
0
;
unsigned
long
rr0
,
rr1
,
rr2
,
rr3
,
rr4
;
unsigned
long
rr0
,
rr1
,
rr2
,
rr3
,
rr4
,
old_rr4
;
old_rr4
=
ia64_get_rr
(
0x8000000000000000
);
rid
=
context
<<
3
;
/* make space for encoding the region number */
rid
=
context
<<
3
;
/* make space for encoding the region number */
rid_incr
=
1
<<
8
;
rid_incr
=
1
<<
8
;
...
@@ -152,7 +153,7 @@ reload_context (mm_context_t context)
...
@@ -152,7 +153,7 @@ reload_context (mm_context_t context)
rr3
=
rr0
+
3
*
rid_incr
;
rr3
=
rr0
+
3
*
rid_incr
;
rr4
=
rr0
+
4
*
rid_incr
;
rr4
=
rr0
+
4
*
rid_incr
;
#ifdef CONFIG_HUGETLB_PAGE
#ifdef CONFIG_HUGETLB_PAGE
rr4
=
(
rr4
&
(
~
(
0xfcUL
)))
|
(
HPAGE_SHIFT
<<
2
);
rr4
=
(
rr4
&
(
~
(
0xfcUL
)))
|
(
old_rr4
&
0xfc
);
#endif
#endif
ia64_set_rr
(
0x0000000000000000
,
rr0
);
ia64_set_rr
(
0x0000000000000000
,
rr0
);
...
...
include/asm-ia64/page.h
View file @
9658e7b7
...
@@ -37,31 +37,14 @@
...
@@ -37,31 +37,14 @@
#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE)
/* per region addr limit */
#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE)
/* per region addr limit */
#ifdef CONFIG_HUGETLB_PAGE
#ifdef CONFIG_HUGETLB_PAGE
# define REGION_HPAGE (4UL)
/* note: this is hardcoded in reload_context()!*/
# if defined(CONFIG_HUGETLB_PAGE_SIZE_4GB)
# define HPAGE_SHIFT 32
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1GB)
# define HPAGE_SHIFT 30
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256MB)
# define HPAGE_SHIFT 28
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
# define HPAGE_SHIFT 26
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_16MB)
# define HPAGE_SHIFT 24
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
# define HPAGE_SHIFT 22
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
# define HPAGE_SHIFT 20
# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256KB)
# define HPAGE_SHIFT 18
# else
# error Unsupported IA-64 HugeTLB Page Size!
# endif
# define REGION_HPAGE (4UL)
/* note: this is hardcoded in mmu_context.h:reload_context()!*/
# define REGION_SHIFT 61
# define REGION_SHIFT 61
# define HPAGE_REGION_BASE (REGION_HPAGE << REGION_SHIFT)
# define HPAGE_SHIFT hpage_shift
# define HPAGE_SHIFT_DEFAULT 28
/* check ia64 SDM for architecture supported size */
# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
# define HPAGE_MASK (~(HPAGE_SIZE - 1))
# define HPAGE_MASK (~(HPAGE_SIZE - 1))
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
#endif
/* CONFIG_HUGETLB_PAGE */
#endif
/* CONFIG_HUGETLB_PAGE */
...
@@ -140,6 +123,7 @@ typedef union ia64_va {
...
@@ -140,6 +123,7 @@ typedef union ia64_va {
# define is_hugepage_only_range(addr, len) \
# define is_hugepage_only_range(addr, len) \
(REGION_NUMBER(addr) == REGION_HPAGE && \
(REGION_NUMBER(addr) == REGION_HPAGE && \
REGION_NUMBER((addr)+(len)) == REGION_HPAGE)
REGION_NUMBER((addr)+(len)) == REGION_HPAGE)
extern
unsigned
int
hpage_shift
;
#endif
#endif
static
__inline__
int
static
__inline__
int
...
...
include/asm-ia64/perfmon_default_smpl.h
View file @
9658e7b7
...
@@ -59,7 +59,7 @@ typedef struct {
...
@@ -59,7 +59,7 @@ typedef struct {
* last_reset_value member indicates the initial value of the overflowed PMD.
* last_reset_value member indicates the initial value of the overflowed PMD.
*/
*/
typedef
struct
{
typedef
struct
{
int
pid
;
/*
active process at PMU interrupt point
*/
int
pid
;
/*
thread id (for NPTL, this is gettid())
*/
unsigned
char
reserved1
[
3
];
/* reserved for future use */
unsigned
char
reserved1
[
3
];
/* reserved for future use */
unsigned
char
ovfl_pmd
;
/* index of overflowed PMD */
unsigned
char
ovfl_pmd
;
/* index of overflowed PMD */
...
@@ -69,7 +69,7 @@ typedef struct {
...
@@ -69,7 +69,7 @@ typedef struct {
unsigned
short
cpu
;
/* cpu on which the overfow occured */
unsigned
short
cpu
;
/* cpu on which the overfow occured */
unsigned
short
set
;
/* event set active when overflow ocurred */
unsigned
short
set
;
/* event set active when overflow ocurred */
unsigned
int
reserved2
;
/* for future use
*/
int
tgid
;
/* thread group id (for NPTL, this is getpid())
*/
}
pfm_default_smpl_entry_t
;
}
pfm_default_smpl_entry_t
;
#define PFM_DEFAULT_MAX_PMDS 64
/* how many pmds supported by data structures (sizeof(unsigned long) */
#define PFM_DEFAULT_MAX_PMDS 64
/* how many pmds supported by data structures (sizeof(unsigned long) */
...
...
include/asm-ia64/scatterlist.h
View file @
9658e7b7
...
@@ -2,7 +2,7 @@
...
@@ -2,7 +2,7 @@
#define _ASM_IA64_SCATTERLIST_H
#define _ASM_IA64_SCATTERLIST_H
/*
/*
* Modified 1998-1999, 2001-2002
* Modified 1998-1999, 2001-2002
, 2004
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
*/
...
@@ -15,6 +15,14 @@ struct scatterlist {
...
@@ -15,6 +15,14 @@ struct scatterlist {
unsigned
int
dma_length
;
unsigned
int
dma_length
;
};
};
#define ISA_DMA_THRESHOLD (~0UL)
/*
* It used to be that ISA_DMA_THRESHOLD had something to do with the
* DMA-limits of ISA-devices. Nowadays, its only remaining use (apart
* from the aha1542.c driver, which isn't 64-bit clean anyhow) is to
* tell the block-layer (via BLK_BOUNCE_ISA) what the max. physical
* address of a page is that is allocated with GFP_DMA. On IA-64,
* that's 4GB - 1.
*/
#define ISA_DMA_THRESHOLD 0xffffffff
#endif
/* _ASM_IA64_SCATTERLIST_H */
#endif
/* _ASM_IA64_SCATTERLIST_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment