Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
a52ccf00
Commit
a52ccf00
authored
Jul 16, 2003
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
http://lia64.bkbits.net/to-linus-2.5
into home.osdl.org:/home/torvalds/v2.5/linux
parents
18a65429
900e7bd2
Changes
23
Hide whitespace changes
Inline
Side-by-side
Showing
23 changed files
with
256 additions
and
61 deletions
+256
-61
arch/ia64/Makefile
arch/ia64/Makefile
+1
-2
arch/ia64/kernel/acpi.c
arch/ia64/kernel/acpi.c
+1
-1
arch/ia64/kernel/entry.S
arch/ia64/kernel/entry.S
+32
-3
arch/ia64/kernel/fsys.S
arch/ia64/kernel/fsys.S
+2
-2
arch/ia64/kernel/ia64_ksyms.c
arch/ia64/kernel/ia64_ksyms.c
+2
-1
arch/ia64/kernel/init_task.c
arch/ia64/kernel/init_task.c
+2
-2
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/iosapic.c
+1
-0
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/perfmon.c
+1
-1
arch/ia64/kernel/ptrace.c
arch/ia64/kernel/ptrace.c
+1
-1
arch/ia64/kernel/setup.c
arch/ia64/kernel/setup.c
+8
-6
arch/ia64/kernel/smp.c
arch/ia64/kernel/smp.c
+2
-2
arch/ia64/kernel/time.c
arch/ia64/kernel/time.c
+1
-2
arch/ia64/pci/pci.c
arch/ia64/pci/pci.c
+6
-6
arch/ia64/scripts/check-model.c
arch/ia64/scripts/check-model.c
+1
-0
arch/ia64/scripts/toolchain-flags
arch/ia64/scripts/toolchain-flags
+9
-1
include/asm-ia64/atomic.h
include/asm-ia64/atomic.h
+75
-2
include/asm-ia64/elf.h
include/asm-ia64/elf.h
+5
-5
include/asm-ia64/local.h
include/asm-ia64/local.h
+50
-0
include/asm-ia64/mmu_context.h
include/asm-ia64/mmu_context.h
+2
-2
include/asm-ia64/percpu.h
include/asm-ia64/percpu.h
+47
-17
include/asm-ia64/processor.h
include/asm-ia64/processor.h
+4
-2
include/asm-ia64/system.h
include/asm-ia64/system.h
+2
-2
include/asm-ia64/tlb.h
include/asm-ia64/tlb.h
+1
-1
No files found.
arch/ia64/Makefile
View file @
a52ccf00
...
@@ -66,8 +66,7 @@ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
...
@@ -66,8 +66,7 @@ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
drivers-$(CONFIG_PCI)
+=
arch
/ia64/pci/
drivers-$(CONFIG_PCI)
+=
arch
/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM)
+=
arch
/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_SIM)
+=
arch
/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1)
+=
arch
/ia64/hp/common/
arch
/ia64/hp/zx1/
drivers-$(CONFIG_IA64_HP_ZX1)
+=
arch
/ia64/hp/common/
arch
/ia64/hp/zx1/
drivers-$(CONFIG_IA64_GENERIC)
+=
arch
/ia64/hp/common/
arch
/ia64/hp/zx1/
arch
/ia64/hp/sim/
\
drivers-$(CONFIG_IA64_GENERIC)
+=
arch
/ia64/hp/common/
arch
/ia64/hp/zx1/
arch
/ia64/hp/sim/
arch
/ia64/sn/
boot
:=
arch
/ia64/boot
boot
:=
arch
/ia64/boot
...
...
arch/ia64/kernel/acpi.c
View file @
a52ccf00
...
@@ -720,7 +720,7 @@ acpi_register_irq (u32 gsi, u32 polarity, u32 trigger)
...
@@ -720,7 +720,7 @@ acpi_register_irq (u32 gsi, u32 polarity, u32 trigger)
{
{
int
vector
=
0
;
int
vector
=
0
;
if
(
acpi_madt
->
flags
.
pcat_compat
&&
(
gsi
<
16
))
if
(
has_8259
&&
(
gsi
<
16
))
return
isa_irq_to_vector
(
gsi
);
return
isa_irq_to_vector
(
gsi
);
if
(
!
iosapic_register_intr
)
if
(
!
iosapic_register_intr
)
...
...
arch/ia64/kernel/entry.S
View file @
a52ccf00
...
@@ -61,7 +61,17 @@ ENTRY(ia64_execve)
...
@@ -61,7 +61,17 @@ ENTRY(ia64_execve)
mov
out2
=
in2
//
envp
mov
out2
=
in2
//
envp
add
out3
=
16
,
sp
//
regs
add
out3
=
16
,
sp
//
regs
br.call.sptk.many
rp
=
sys_execve
br.call.sptk.many
rp
=
sys_execve
.
ret0
:
cmp4.ge
p6
,
p7
=
r8
,
r0
.
ret0
:
#ifdef CONFIG_IA32_SUPPORT
/
*
*
Check
if
we
're returning to ia32 mode. If so, we need to restore ia32 registers
*
from
pt_regs
.
*/
adds
r16
=
PT
(
CR_IPSR
)+
16
,
sp
;;
ld8
r16
=[
r16
]
#endif
cmp4.ge
p6
,
p7
=
r8
,
r0
mov
ar
.
pfs
=
loc1
//
restore
ar
.
pfs
mov
ar
.
pfs
=
loc1
//
restore
ar
.
pfs
sxt4
r8
=
r8
//
return
64
-
bit
result
sxt4
r8
=
r8
//
return
64
-
bit
result
;;
;;
...
@@ -89,6 +99,12 @@ ENTRY(ia64_execve)
...
@@ -89,6 +99,12 @@ ENTRY(ia64_execve)
ldf.fill
f23
=[
sp
]
; ldf.fill f24=[sp]; mov f25=f0
ldf.fill
f23
=[
sp
]
; ldf.fill f24=[sp]; mov f25=f0
ldf.fill
f26
=[
sp
]
; ldf.fill f27=[sp]; mov f28=f0
ldf.fill
f26
=[
sp
]
; ldf.fill f27=[sp]; mov f28=f0
ldf.fill
f29
=[
sp
]
; ldf.fill f30=[sp]; mov f31=f0
ldf.fill
f29
=[
sp
]
; ldf.fill f30=[sp]; mov f31=f0
#ifdef CONFIG_IA32_SUPPORT
tbit.nz
p6
,
p0
=
r16
,
IA64_PSR_IS_BIT
movl
loc0
=
ia64_ret_from_ia32_execve
;;
(
p6
)
mov
rp
=
loc0
#endif
br.ret.sptk.many
rp
br.ret.sptk.many
rp
END
(
ia64_execve
)
END
(
ia64_execve
)
...
@@ -688,7 +704,7 @@ GLOBAL_ENTRY(ia64_leave_syscall)
...
@@ -688,7 +704,7 @@ GLOBAL_ENTRY(ia64_leave_syscall)
mov
b7
=
r0
//
clear
b7
mov
b7
=
r0
//
clear
b7
;;
;;
(
pUStk
)
st1
[
r14
]=
r3
(
pUStk
)
st1
[
r14
]=
r3
movl
r17
=
THIS_CPU
(
ia64_phys_stacked_size_p8
)
addl
r17
=
THIS_CPU
(
ia64_phys_stacked_size_p8
),
r0
;;
;;
mov
r16
=
ar
.
bsp
//
get
existing
backing
store
pointer
mov
r16
=
ar
.
bsp
//
get
existing
backing
store
pointer
srlz.i
//
ensure
interruption
collection
is
off
srlz.i
//
ensure
interruption
collection
is
off
...
@@ -701,6 +717,19 @@ GLOBAL_ENTRY(ia64_leave_syscall)
...
@@ -701,6 +717,19 @@ GLOBAL_ENTRY(ia64_leave_syscall)
br.cond.sptk.many
rbs_switch
br.cond.sptk.many
rbs_switch
END
(
ia64_leave_syscall
)
END
(
ia64_leave_syscall
)
#ifdef CONFIG_IA32_SUPPORT
GLOBAL_ENTRY
(
ia64_ret_from_ia32_execve
)
PT_REGS_UNWIND_INFO
(0)
adds
r2
=
PT
(
R8
)+
16
,
sp
//
r2
=
&
pt_regs
.
r8
adds
r3
=
PT
(
R10
)+
16
,
sp
//
r3
=
&
pt_regs
.
r10
;;
.
mem.offset
0
,
0
st8.spill
[
r2
]=
r8
//
store
return
value
in
slot
for
r8
and
set
unat
bit
.
mem.offset
8
,
0
st8.spill
[
r3
]=
r0
//
clear
error
indication
in
slot
for
r10
and
set
unat
bit
END
(
ia64_ret_from_ia32_execve_syscall
)
//
fall
through
#endif /* CONFIG_IA32_SUPPORT */
GLOBAL_ENTRY
(
ia64_leave_kernel
)
GLOBAL_ENTRY
(
ia64_leave_kernel
)
PT_REGS_UNWIND_INFO
(0)
PT_REGS_UNWIND_INFO
(0)
/
*
/
*
...
@@ -841,7 +870,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
...
@@ -841,7 +870,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
shr.u
r18
=
r19
,
16
//
get
byte
size
of
existing
"dirty"
partition
shr.u
r18
=
r19
,
16
//
get
byte
size
of
existing
"dirty"
partition
;;
;;
mov
r16
=
ar
.
bsp
//
get
existing
backing
store
pointer
mov
r16
=
ar
.
bsp
//
get
existing
backing
store
pointer
movl
r17
=
THIS_CPU
(
ia64_phys_stacked_size_p8
)
addl
r17
=
THIS_CPU
(
ia64_phys_stacked_size_p8
),
r0
;;
;;
ld4
r17
=[
r17
]
//
r17
=
cpu_data
->
phys_stacked_size_p8
ld4
r17
=[
r17
]
//
r17
=
cpu_data
->
phys_stacked_size_p8
(
pKStk
)
br.cond.dpnt
skip_rbs_switch
(
pKStk
)
br.cond.dpnt
skip_rbs_switch
...
...
arch/ia64/kernel/fsys.S
View file @
a52ccf00
...
@@ -165,7 +165,7 @@ ENTRY(fsys_gettimeofday)
...
@@ -165,7 +165,7 @@ ENTRY(fsys_gettimeofday)
.
altrp
b6
.
altrp
b6
.
body
.
body
add
r9
=
TI_FLAGS
+
IA64_TASK_SIZE
,
r16
add
r9
=
TI_FLAGS
+
IA64_TASK_SIZE
,
r16
movl
r3
=
THIS_CPU
(
cpu_info
)
addl
r3
=
THIS_CPU
(
cpu_info
),
r0
mov.m
r31
=
ar
.
itc
//
put
time
stamp
into
r31
(
ITC
)
==
now
(
35
cyc
)
mov.m
r31
=
ar
.
itc
//
put
time
stamp
into
r31
(
ITC
)
==
now
(
35
cyc
)
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
...
@@ -177,7 +177,7 @@ ENTRY(fsys_gettimeofday)
...
@@ -177,7 +177,7 @@ ENTRY(fsys_gettimeofday)
movl
r19
=
xtime
//
xtime
is
a
timespec
struct
movl
r19
=
xtime
//
xtime
is
a
timespec
struct
ld8
r10
=[
r10
]
//
r10
<-
__per_cpu_offset
[
0
]
ld8
r10
=[
r10
]
//
r10
<-
__per_cpu_offset
[
0
]
movl
r21
=
THIS_CPU
(
cpu_info
)
addl
r21
=
THIS_CPU
(
cpu_info
),
r0
;;
;;
add
r10
=
r21
,
r10
//
r10
<-
&c
pu_data
(
time_keeper_id
)
add
r10
=
r21
,
r10
//
r10
<-
&c
pu_data
(
time_keeper_id
)
tbit.nz
p8
,
p0
=
r2
,
IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT
tbit.nz
p8
,
p0
=
r2
,
IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT
...
...
arch/ia64/kernel/ia64_ksyms.c
View file @
a52ccf00
...
@@ -64,9 +64,10 @@ EXPORT_SYMBOL(ia64_pfn_valid);
...
@@ -64,9 +64,10 @@ EXPORT_SYMBOL(ia64_pfn_valid);
#endif
#endif
#include <asm/processor.h>
#include <asm/processor.h>
EXPORT_SYMBOL
(
cpu_info__per_cpu
);
EXPORT_SYMBOL
(
per_cpu__cpu_info
);
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
EXPORT_SYMBOL
(
__per_cpu_offset
);
EXPORT_SYMBOL
(
__per_cpu_offset
);
EXPORT_SYMBOL
(
per_cpu__local_per_cpu_offset
);
#endif
#endif
EXPORT_SYMBOL
(
kernel_thread
);
EXPORT_SYMBOL
(
kernel_thread
);
...
...
arch/ia64/kernel/init_task.c
View file @
a52ccf00
...
@@ -2,7 +2,7 @@
...
@@ -2,7 +2,7 @@
* This is where we statically allocate and initialize the initial
* This is where we statically allocate and initialize the initial
* task.
* task.
*
*
* Copyright (C) 1999, 2002 Hewlett-Packard Co
* Copyright (C) 1999, 2002
-2003
Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
*/
...
@@ -34,7 +34,7 @@ static union {
...
@@ -34,7 +34,7 @@ static union {
struct
thread_info
thread_info
;
struct
thread_info
thread_info
;
}
s
;
}
s
;
unsigned
long
stack
[
KERNEL_STACK_SIZE
/
sizeof
(
unsigned
long
)];
unsigned
long
stack
[
KERNEL_STACK_SIZE
/
sizeof
(
unsigned
long
)];
}
init_task_mem
__attribute__
((
section
(
".data.init_task"
)))
=
{{
}
init_task_mem
asm
(
"init_task_mem"
)
__attribute__
((
section
(
".data.init_task"
)))
=
{{
.
task
=
INIT_TASK
(
init_task_mem
.
s
.
task
),
.
task
=
INIT_TASK
(
init_task_mem
.
s
.
task
),
.
thread_info
=
INIT_THREAD_INFO
(
init_task_mem
.
s
.
task
)
.
thread_info
=
INIT_THREAD_INFO
(
init_task_mem
.
s
.
task
)
}};
}};
...
...
arch/ia64/kernel/iosapic.c
View file @
a52ccf00
...
@@ -717,6 +717,7 @@ iosapic_parse_prt (void)
...
@@ -717,6 +717,7 @@ iosapic_parse_prt (void)
register_intr
(
gsi
,
vector
,
IOSAPIC_LOWEST_PRIORITY
,
IOSAPIC_POL_LOW
,
register_intr
(
gsi
,
vector
,
IOSAPIC_LOWEST_PRIORITY
,
IOSAPIC_POL_LOW
,
IOSAPIC_LEVEL
);
IOSAPIC_LEVEL
);
}
}
entry
->
irq
=
vector
;
snprintf
(
pci_id
,
sizeof
(
pci_id
),
"%02x:%02x:%02x[%c]"
,
snprintf
(
pci_id
,
sizeof
(
pci_id
),
"%02x:%02x:%02x[%c]"
,
entry
->
id
.
segment
,
entry
->
id
.
bus
,
entry
->
id
.
device
,
'A'
+
entry
->
pin
);
entry
->
id
.
segment
,
entry
->
id
.
bus
,
entry
->
id
.
device
,
'A'
+
entry
->
pin
);
...
...
arch/ia64/kernel/perfmon.c
View file @
a52ccf00
...
@@ -566,7 +566,7 @@ static struct vm_operations_struct pfm_vm_ops={
...
@@ -566,7 +566,7 @@ static struct vm_operations_struct pfm_vm_ops={
#define pfm_wait_task_inactive(t) wait_task_inactive(t)
#define pfm_wait_task_inactive(t) wait_task_inactive(t)
#define pfm_get_cpu_var(v) __
get
_cpu_var(v)
#define pfm_get_cpu_var(v) __
ia64_per
_cpu_var(v)
#define pfm_get_cpu_data(a,b) per_cpu(a, b)
#define pfm_get_cpu_data(a,b) per_cpu(a, b)
typedef
irqreturn_t
pfm_irq_handler_t
;
typedef
irqreturn_t
pfm_irq_handler_t
;
#define PFM_IRQ_HANDLER_RET(v) do { \
#define PFM_IRQ_HANDLER_RET(v) do { \
...
...
arch/ia64/kernel/ptrace.c
View file @
a52ccf00
...
@@ -42,7 +42,7 @@
...
@@ -42,7 +42,7 @@
(IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
(IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
#define IPSR_READ_MASK IPSR_WRITE_MASK
#define IPSR_READ_MASK IPSR_WRITE_MASK
#define PTRACE_DEBUG
1
#define PTRACE_DEBUG
0
#if PTRACE_DEBUG
#if PTRACE_DEBUG
# define dprintk(format...) printk(format)
# define dprintk(format...) printk(format)
...
...
arch/ia64/kernel/setup.c
View file @
a52ccf00
...
@@ -56,6 +56,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
...
@@ -56,6 +56,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
#endif
#endif
DEFINE_PER_CPU
(
struct
cpuinfo_ia64
,
cpu_info
);
DEFINE_PER_CPU
(
struct
cpuinfo_ia64
,
cpu_info
);
DEFINE_PER_CPU
(
unsigned
long
,
local_per_cpu_offset
);
DEFINE_PER_CPU
(
unsigned
long
,
ia64_phys_stacked_size_p8
);
DEFINE_PER_CPU
(
unsigned
long
,
ia64_phys_stacked_size_p8
);
unsigned
long
ia64_cycles_per_usec
;
unsigned
long
ia64_cycles_per_usec
;
struct
ia64_boot_param
*
ia64_boot_param
;
struct
ia64_boot_param
*
ia64_boot_param
;
...
@@ -709,6 +710,8 @@ cpu_init (void)
...
@@ -709,6 +710,8 @@ cpu_init (void)
memcpy
(
cpu_data
,
__phys_per_cpu_start
,
__per_cpu_end
-
__per_cpu_start
);
memcpy
(
cpu_data
,
__phys_per_cpu_start
,
__per_cpu_end
-
__per_cpu_start
);
__per_cpu_offset
[
cpu
]
=
(
char
*
)
cpu_data
-
__per_cpu_start
;
__per_cpu_offset
[
cpu
]
=
(
char
*
)
cpu_data
-
__per_cpu_start
;
cpu_data
+=
PERCPU_PAGE_SIZE
;
cpu_data
+=
PERCPU_PAGE_SIZE
;
per_cpu
(
local_per_cpu_offset
,
cpu
)
=
__per_cpu_offset
[
cpu
];
}
}
}
}
cpu_data
=
__per_cpu_start
+
__per_cpu_offset
[
smp_processor_id
()];
cpu_data
=
__per_cpu_start
+
__per_cpu_offset
[
smp_processor_id
()];
...
@@ -716,19 +719,18 @@ cpu_init (void)
...
@@ -716,19 +719,18 @@ cpu_init (void)
cpu_data
=
__phys_per_cpu_start
;
cpu_data
=
__phys_per_cpu_start
;
#endif
/* !CONFIG_SMP */
#endif
/* !CONFIG_SMP */
cpu_info
=
cpu_data
+
((
char
*
)
&
__get_cpu_var
(
cpu_info
)
-
__per_cpu_start
);
#ifdef CONFIG_NUMA
cpu_info
->
node_data
=
get_node_data_ptr
();
#endif
get_max_cacheline_size
();
get_max_cacheline_size
();
/*
/*
* We can't pass "local_cpu_data" to identify_cpu() because we haven't called
* We can't pass "local_cpu_data" to identify_cpu() because we haven't called
* ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
* ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
* depends on the data returned by identify_cpu(). We break the dependency by
* depends on the data returned by identify_cpu(). We break the dependency by
* accessing cpu_data() th
e old way, through identity mapped space
.
* accessing cpu_data() th
rough the canonical per-CPU address
.
*/
*/
cpu_info
=
cpu_data
+
((
char
*
)
&
__ia64_per_cpu_var
(
cpu_info
)
-
__per_cpu_start
);
#ifdef CONFIG_NUMA
cpu_info
->
node_data
=
get_node_data_ptr
();
#endif
identify_cpu
(
cpu_info
);
identify_cpu
(
cpu_info
);
#ifdef CONFIG_MCKINLEY
#ifdef CONFIG_MCKINLEY
...
...
arch/ia64/kernel/smp.c
View file @
a52ccf00
...
@@ -72,7 +72,7 @@ static volatile struct call_data_struct *call_data;
...
@@ -72,7 +72,7 @@ static volatile struct call_data_struct *call_data;
#define IPI_CPU_STOP 1
#define IPI_CPU_STOP 1
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
static
DEFINE_PER_CPU
(
__
u64
,
ipi_operation
)
____cacheline_aligned
;
static
DEFINE_PER_CPU
(
u64
,
ipi_operation
)
____cacheline_aligned
;
static
void
static
void
stop_this_cpu
(
void
)
stop_this_cpu
(
void
)
...
@@ -91,7 +91,7 @@ irqreturn_t
...
@@ -91,7 +91,7 @@ irqreturn_t
handle_IPI
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
)
handle_IPI
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
)
{
{
int
this_cpu
=
get_cpu
();
int
this_cpu
=
get_cpu
();
unsigned
long
*
pending_ipis
=
&
__
get
_cpu_var
(
ipi_operation
);
unsigned
long
*
pending_ipis
=
&
__
ia64_per
_cpu_var
(
ipi_operation
);
unsigned
long
ops
;
unsigned
long
ops
;
/* Count this now; we may make a call that never returns. */
/* Count this now; we may make a call that never returns. */
...
...
arch/ia64/kernel/time.c
View file @
a52ccf00
...
@@ -83,12 +83,11 @@ unsigned long
...
@@ -83,12 +83,11 @@ unsigned long
itc_get_offset
(
void
)
itc_get_offset
(
void
)
{
{
unsigned
long
elapsed_cycles
,
lost
=
jiffies
-
wall_jiffies
;
unsigned
long
elapsed_cycles
,
lost
=
jiffies
-
wall_jiffies
;
unsigned
long
now
,
last_tick
;
unsigned
long
now
=
ia64_get_itc
()
,
last_tick
;
last_tick
=
(
cpu_data
(
TIME_KEEPER_ID
)
->
itm_next
last_tick
=
(
cpu_data
(
TIME_KEEPER_ID
)
->
itm_next
-
(
lost
+
1
)
*
cpu_data
(
TIME_KEEPER_ID
)
->
itm_delta
);
-
(
lost
+
1
)
*
cpu_data
(
TIME_KEEPER_ID
)
->
itm_delta
);
now
=
ia64_get_itc
();
if
(
unlikely
((
long
)
(
now
-
last_tick
)
<
0
))
{
if
(
unlikely
((
long
)
(
now
-
last_tick
)
<
0
))
{
printk
(
KERN_ERR
"CPU %d: now < last_tick (now=0x%lx,last_tick=0x%lx)!
\n
"
,
printk
(
KERN_ERR
"CPU %d: now < last_tick (now=0x%lx,last_tick=0x%lx)!
\n
"
,
smp_processor_id
(),
now
,
last_tick
);
smp_processor_id
(),
now
,
last_tick
);
...
...
arch/ia64/pci/pci.c
View file @
a52ccf00
...
@@ -124,7 +124,7 @@ subsys_initcall(pci_acpi_init);
...
@@ -124,7 +124,7 @@ subsys_initcall(pci_acpi_init);
/* Called by ACPI when it finds a new root bus. */
/* Called by ACPI when it finds a new root bus. */
static
struct
pci_controller
*
static
struct
pci_controller
*
__devinit
alloc_pci_controller
(
int
seg
)
alloc_pci_controller
(
int
seg
)
{
{
struct
pci_controller
*
controller
;
struct
pci_controller
*
controller
;
...
@@ -138,7 +138,7 @@ alloc_pci_controller (int seg)
...
@@ -138,7 +138,7 @@ alloc_pci_controller (int seg)
return
controller
;
return
controller
;
}
}
static
int
static
int
__devinit
alloc_resource
(
char
*
name
,
struct
resource
*
root
,
unsigned
long
start
,
unsigned
long
end
,
unsigned
long
flags
)
alloc_resource
(
char
*
name
,
struct
resource
*
root
,
unsigned
long
start
,
unsigned
long
end
,
unsigned
long
flags
)
{
{
struct
resource
*
res
;
struct
resource
*
res
;
...
@@ -159,7 +159,7 @@ alloc_resource (char *name, struct resource *root, unsigned long start, unsigned
...
@@ -159,7 +159,7 @@ alloc_resource (char *name, struct resource *root, unsigned long start, unsigned
return
0
;
return
0
;
}
}
static
u64
static
u64
__devinit
add_io_space
(
struct
acpi_resource_address64
*
addr
)
add_io_space
(
struct
acpi_resource_address64
*
addr
)
{
{
u64
offset
;
u64
offset
;
...
@@ -190,7 +190,7 @@ add_io_space (struct acpi_resource_address64 *addr)
...
@@ -190,7 +190,7 @@ add_io_space (struct acpi_resource_address64 *addr)
return
IO_SPACE_BASE
(
i
);
return
IO_SPACE_BASE
(
i
);
}
}
static
acpi_status
static
acpi_status
__devinit
count_window
(
struct
acpi_resource
*
resource
,
void
*
data
)
count_window
(
struct
acpi_resource
*
resource
,
void
*
data
)
{
{
unsigned
int
*
windows
=
(
unsigned
int
*
)
data
;
unsigned
int
*
windows
=
(
unsigned
int
*
)
data
;
...
@@ -211,7 +211,7 @@ struct pci_root_info {
...
@@ -211,7 +211,7 @@ struct pci_root_info {
char
*
name
;
char
*
name
;
};
};
static
acpi_status
static
acpi_status
__devinit
add_window
(
struct
acpi_resource
*
res
,
void
*
data
)
add_window
(
struct
acpi_resource
*
res
,
void
*
data
)
{
{
struct
pci_root_info
*
info
=
(
struct
pci_root_info
*
)
data
;
struct
pci_root_info
*
info
=
(
struct
pci_root_info
*
)
data
;
...
@@ -252,7 +252,7 @@ add_window (struct acpi_resource *res, void *data)
...
@@ -252,7 +252,7 @@ add_window (struct acpi_resource *res, void *data)
return
AE_OK
;
return
AE_OK
;
}
}
struct
pci_bus
*
struct
pci_bus
*
__devinit
pci_acpi_scan_root
(
struct
acpi_device
*
device
,
int
domain
,
int
bus
)
pci_acpi_scan_root
(
struct
acpi_device
*
device
,
int
domain
,
int
bus
)
{
{
struct
pci_root_info
info
;
struct
pci_root_info
info
;
...
...
arch/ia64/scripts/check-model.c
0 → 100644
View file @
a52ccf00
int
__attribute__
((
__model__
(
__small__
)))
x
;
arch/ia64/scripts/toolchain-flags
View file @
a52ccf00
...
@@ -2,6 +2,7 @@
...
@@ -2,6 +2,7 @@
#
#
# Check whether linker can handle cross-segment @segrel():
# Check whether linker can handle cross-segment @segrel():
#
#
CPPFLAGS
=
""
CC
=
$1
CC
=
$1
OBJDUMP
=
$2
OBJDUMP
=
$2
dir
=
$(
dirname
$0
)
dir
=
$(
dirname
$0
)
...
@@ -11,10 +12,17 @@ $CC -nostdlib -static -Wl,-T$dir/check-segrel.lds $dir/check-segrel.S -o $out
...
@@ -11,10 +12,17 @@ $CC -nostdlib -static -Wl,-T$dir/check-segrel.lds $dir/check-segrel.S -o $out
res
=
$(
$OBJDUMP
--full
--section
.rodata
$out
| fgrep 000 |
cut
-f3
-d
' '
)
res
=
$(
$OBJDUMP
--full
--section
.rodata
$out
| fgrep 000 |
cut
-f3
-d
' '
)
rm
-f
$out
rm
-f
$out
if
[
$res
!=
00000a00
]
;
then
if
[
$res
!=
00000a00
]
;
then
echo
"
-DHAVE_BUGGY_SEGREL"
CPPFLAGS
=
"
$CPPFLAGS
-DHAVE_BUGGY_SEGREL"
cat
>
&2
<<
EOF
cat
>
&2
<<
EOF
warning: your linker cannot handle cross-segment segment-relative relocations.
warning: your linker cannot handle cross-segment segment-relative relocations.
please upgrade to a newer version (it is safe to use this linker, but
please upgrade to a newer version (it is safe to use this linker, but
the kernel will be bigger than strictly necessary).
the kernel will be bigger than strictly necessary).
EOF
EOF
fi
fi
if
!
$CC
-c
$dir
/check-model.c
-o
$out
|
grep
-q
'attribute directive ignored'
then
CPPFLAGS
=
"
$CPPFLAGS
-DHAVE_MODEL_SMALL_ATTRIBUTE"
fi
rm
-f
$out
echo
$CPPFLAGS
include/asm-ia64/atomic.h
View file @
a52ccf00
...
@@ -9,7 +9,7 @@
...
@@ -9,7 +9,7 @@
* "int" types were carefully placed so as to ensure proper operation
* "int" types were carefully placed so as to ensure proper operation
* of the macros.
* of the macros.
*
*
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
* Copyright (C) 1998, 1999, 2002
-2003
Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
*/
#include <linux/types.h>
#include <linux/types.h>
...
@@ -21,11 +21,16 @@
...
@@ -21,11 +21,16 @@
* memory accesses are ordered.
* memory accesses are ordered.
*/
*/
typedef
struct
{
volatile
__s32
counter
;
}
atomic_t
;
typedef
struct
{
volatile
__s32
counter
;
}
atomic_t
;
typedef
struct
{
volatile
__s64
counter
;
}
atomic64_t
;
#define ATOMIC_INIT(i) ((atomic_t) { (i) })
#define ATOMIC_INIT(i) ((atomic_t) { (i) })
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
#define atomic_read(v) ((v)->counter)
#define atomic_read(v) ((v)->counter)
#define atomic64_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#define atomic_set(v,i) (((v)->counter) = (i))
#define atomic64_set(v,i) (((v)->counter) = (i))
static
__inline__
int
static
__inline__
int
ia64_atomic_add
(
int
i
,
atomic_t
*
v
)
ia64_atomic_add
(
int
i
,
atomic_t
*
v
)
...
@@ -37,7 +42,21 @@ ia64_atomic_add (int i, atomic_t *v)
...
@@ -37,7 +42,21 @@ ia64_atomic_add (int i, atomic_t *v)
CMPXCHG_BUGCHECK
(
v
);
CMPXCHG_BUGCHECK
(
v
);
old
=
atomic_read
(
v
);
old
=
atomic_read
(
v
);
new
=
old
+
i
;
new
=
old
+
i
;
}
while
(
ia64_cmpxchg
(
"acq"
,
v
,
old
,
old
+
i
,
sizeof
(
atomic_t
))
!=
old
);
}
while
(
ia64_cmpxchg
(
"acq"
,
v
,
old
,
new
,
sizeof
(
atomic_t
))
!=
old
);
return
new
;
}
static
__inline__
int
ia64_atomic64_add
(
int
i
,
atomic64_t
*
v
)
{
__s64
old
,
new
;
CMPXCHG_BUGCHECK_DECL
do
{
CMPXCHG_BUGCHECK
(
v
);
old
=
atomic_read
(
v
);
new
=
old
+
i
;
}
while
(
ia64_cmpxchg
(
"acq"
,
v
,
old
,
new
,
sizeof
(
atomic_t
))
!=
old
);
return
new
;
return
new
;
}
}
...
@@ -55,6 +74,20 @@ ia64_atomic_sub (int i, atomic_t *v)
...
@@ -55,6 +74,20 @@ ia64_atomic_sub (int i, atomic_t *v)
return
new
;
return
new
;
}
}
static
__inline__
int
ia64_atomic64_sub
(
int
i
,
atomic64_t
*
v
)
{
__s64
old
,
new
;
CMPXCHG_BUGCHECK_DECL
do
{
CMPXCHG_BUGCHECK
(
v
);
old
=
atomic_read
(
v
);
new
=
old
-
i
;
}
while
(
ia64_cmpxchg
(
"acq"
,
v
,
old
,
new
,
sizeof
(
atomic_t
))
!=
old
);
return
new
;
}
#define atomic_add_return(i,v) \
#define atomic_add_return(i,v) \
({ \
({ \
int __ia64_aar_i = (i); \
int __ia64_aar_i = (i); \
...
@@ -67,6 +100,18 @@ ia64_atomic_sub (int i, atomic_t *v)
...
@@ -67,6 +100,18 @@ ia64_atomic_sub (int i, atomic_t *v)
: ia64_atomic_add(__ia64_aar_i, v); \
: ia64_atomic_add(__ia64_aar_i, v); \
})
})
#define atomic64_add_return(i,v) \
({ \
long __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic64_add(__ia64_aar_i, v); \
})
/*
/*
* Atomically add I to V and return TRUE if the resulting value is
* Atomically add I to V and return TRUE if the resulting value is
* negative.
* negative.
...
@@ -77,6 +122,12 @@ atomic_add_negative (int i, atomic_t *v)
...
@@ -77,6 +122,12 @@ atomic_add_negative (int i, atomic_t *v)
return
atomic_add_return
(
i
,
v
)
<
0
;
return
atomic_add_return
(
i
,
v
)
<
0
;
}
}
static
__inline__
int
atomic64_add_negative
(
int
i
,
atomic64_t
*
v
)
{
return
atomic64_add_return
(
i
,
v
)
<
0
;
}
#define atomic_sub_return(i,v) \
#define atomic_sub_return(i,v) \
({ \
({ \
int __ia64_asr_i = (i); \
int __ia64_asr_i = (i); \
...
@@ -89,18 +140,40 @@ atomic_add_negative (int i, atomic_t *v)
...
@@ -89,18 +140,40 @@ atomic_add_negative (int i, atomic_t *v)
: ia64_atomic_sub(__ia64_asr_i, v); \
: ia64_atomic_sub(__ia64_asr_i, v); \
})
})
#define atomic64_sub_return(i,v) \
({ \
long __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) != 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) != 0)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) != 0)
#define atomic_add(i,v) atomic_add_return((i), (v))
#define atomic_add(i,v) atomic_add_return((i), (v))
#define atomic_sub(i,v) atomic_sub_return((i), (v))
#define atomic_sub(i,v) atomic_sub_return((i), (v))
#define atomic_inc(v) atomic_add(1, (v))
#define atomic_inc(v) atomic_add(1, (v))
#define atomic_dec(v) atomic_sub(1, (v))
#define atomic_dec(v) atomic_sub(1, (v))
#define atomic64_add(i,v) atomic64_add_return((i), (v))
#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
#define atomic64_inc(v) atomic64_add(1, (v))
#define atomic64_dec(v) atomic64_sub(1, (v))
/* Atomic operations are already serializing */
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
...
...
include/asm-ia64/elf.h
View file @
a52ccf00
...
@@ -199,11 +199,11 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
...
@@ -199,11 +199,11 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR)
#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR)
#define ARCH_DLINFO \
#define ARCH_DLINFO
\
do { \
do {
\
extern char __kernel_syscall_via_epc[]; \
extern char __kernel_syscall_via_epc[];
\
NEW_AUX_ENT(AT_SYSINFO,
__kernel_syscall_via_epc);
\
NEW_AUX_ENT(AT_SYSINFO,
(unsigned long) __kernel_syscall_via_epc);
\
NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR);
\
} while (0)
} while (0)
/*
/*
...
...
include/asm-ia64/local.h
0 → 100644
View file @
a52ccf00
#ifndef _ASM_IA64_LOCAL_H
#define _ASM_IA64_LOCAL_H
/*
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/percpu.h>
typedef
struct
{
atomic64_t
val
;
}
local_t
;
#define LOCAL_INIT(i) ((local_t) { { (i) } })
#define local_read(l) atomic64_read(&(l)->val)
#define local_set(l, i) atomic64_set(&(l)->val, i)
#define local_inc(l) atomic64_inc(&(l)->val)
#define local_dec(l) atomic64_dec(&(l)->val)
#define local_add(l) atomic64_add(&(l)->val)
#define local_sub(l) atomic64_sub(&(l)->val)
/* Non-atomic variants, i.e., preemption disabled and won't be touched in interrupt, etc. */
#define __local_inc(l) (++(l)->val.counter)
#define __local_dec(l) (--(l)->val.counter)
#define __local_add(i,l) ((l)->val.counter += (i))
#define __local_sub(i,l) ((l)->val.counter -= (i))
/*
* Use these for per-cpu local_t variables. Note they take a variable (eg. mystruct.foo),
* not an address.
*/
#define cpu_local_read(v) local_read(&__ia64_per_cpu_var(v))
#define cpu_local_set(v, i) local_set(&__ia64_per_cpu_var(v), (i))
#define cpu_local_inc(v) local_inc(&__ia64_per_cpu_var(v))
#define cpu_local_dec(v) local_dec(&__ia64_per_cpu_var(v))
#define cpu_local_add(i, v) local_add((i), &__ia64_per_cpu_var(v))
#define cpu_local_sub(i, v) local_sub((i), &__ia64_per_cpu_var(v))
/*
* Non-atomic increments, i.e., preemption disabled and won't be touched in interrupt,
* etc.
*/
#define __cpu_local_inc(v) __local_inc(&__ia64_per_cpu_var(v))
#define __cpu_local_dec(v) __local_dec(&__ia64_per_cpu_var(v))
#define __cpu_local_add(i, v) __local_add((i), &__ia64_per_cpu_var(v))
#define __cpu_local_sub(i, v) __local_sub((i), &__ia64_per_cpu_var(v))
#endif
/* _ASM_IA64_LOCAL_H */
include/asm-ia64/mmu_context.h
View file @
a52ccf00
...
@@ -86,9 +86,9 @@ delayed_tlb_flush (void)
...
@@ -86,9 +86,9 @@ delayed_tlb_flush (void)
{
{
extern
void
local_flush_tlb_all
(
void
);
extern
void
local_flush_tlb_all
(
void
);
if
(
unlikely
(
__
get
_cpu_var
(
ia64_need_tlb_flush
)))
{
if
(
unlikely
(
__
ia64_per
_cpu_var
(
ia64_need_tlb_flush
)))
{
local_flush_tlb_all
();
local_flush_tlb_all
();
__
get
_cpu_var
(
ia64_need_tlb_flush
)
=
0
;
__
ia64_per
_cpu_var
(
ia64_need_tlb_flush
)
=
0
;
}
}
}
}
...
...
include/asm-ia64/percpu.h
View file @
a52ccf00
#ifndef _ASM_IA64_PERCPU_H
#ifndef _ASM_IA64_PERCPU_H
#define _ASM_IA64_PERCPU_H
#define _ASM_IA64_PERCPU_H
#include <linux/config.h>
#include <linux/compiler.h>
/*
/*
* Copyright (C) 2002-2003 Hewlett-Packard Co
* Copyright (C) 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
*/
#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
#ifdef __ASSEMBLY__
#ifdef __ASSEMBLY__
# define THIS_CPU(var) (per_cpu__##var)
/* use this to mark accesses to per-CPU variables... */
#define THIS_CPU(var) (var##__per_cpu)
/* use this to mark accesses to per-CPU variables... */
#else
/* !__ASSEMBLY__ */
#else
/* !__ASSEMBLY__ */
#include <linux/config.h>
#include <linux/threads.h>
#include <linux/threads.h>
extern
unsigned
long
__per_cpu_offset
[
NR_CPUS
];
#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
#else
# define __SMALL_ADDR_AREA
#endif
#define DEFINE_PER_CPU(type, name) \
#define DECLARE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) name##__per_cpu
extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) name##__per_cpu
#define __get_cpu_var(var) (var##__per_cpu)
/* Separate out the type, so (int[3], foo) works. */
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) \
__SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
/*
* Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
* external routine, to avoid include-hell.
*/
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
# define per_cpu(var, cpu) (*RELOC_HIDE(&var##__per_cpu, __per_cpu_offset[cpu]))
extern
unsigned
long
__per_cpu_offset
[
NR_CPUS
];
/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
DECLARE_PER_CPU
(
unsigned
long
,
local_per_cpu_offset
);
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
extern
void
percpu_modcopy
(
void
*
pcpudst
,
const
void
*
src
,
unsigned
long
size
);
extern
void
percpu_modcopy
(
void
*
pcpudst
,
const
void
*
src
,
unsigned
long
size
);
#else
# define per_cpu(var, cpu) ((void)cpu, __get_cpu_var(var))
#endif
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var##__per_cpu)
#else
/* ! SMP */
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var##__per_cpu)
#define per_cpu(var, cpu) ((void)cpu, per_cpu__##var)
#define __get_cpu_var(var) per_cpu__##var
#endif
/* SMP */
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
/* ia64-specific part: */
extern
void
setup_per_cpu_areas
(
void
);
extern
void
setup_per_cpu_areas
(
void
);
/*
* Be extremely careful when taking the address of this variable! Due to virtual
* remapping, it is different from the canonical address returned by __get_cpu_var(var)!
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
* more efficient.
*/
#define __ia64_per_cpu_var(var) (per_cpu__##var)
#endif
/* !__ASSEMBLY__ */
#endif
/* !__ASSEMBLY__ */
#endif
/* _ASM_IA64_PERCPU_H */
#endif
/* _ASM_IA64_PERCPU_H */
include/asm-ia64/processor.h
View file @
a52ccf00
...
@@ -191,10 +191,12 @@ struct cpuinfo_ia64 {
...
@@ -191,10 +191,12 @@ struct cpuinfo_ia64 {
DECLARE_PER_CPU
(
struct
cpuinfo_ia64
,
cpu_info
);
DECLARE_PER_CPU
(
struct
cpuinfo_ia64
,
cpu_info
);
/*
/*
* The "local" data
pointer. It point
s to the per-CPU data of the currently executing
* The "local" data
variable. It refer
s to the per-CPU data of the currently executing
* CPU, much like "current" points to the per-task data of the currently executing task.
* CPU, much like "current" points to the per-task data of the currently executing task.
* Do not use the address of local_cpu_data, since it will be different from
* cpu_data(smp_processor_id())!
*/
*/
#define local_cpu_data (&__
get
_cpu_var(cpu_info))
#define local_cpu_data (&__
ia64_per
_cpu_var(cpu_info))
#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
extern
void
identify_cpu
(
struct
cpuinfo_ia64
*
);
extern
void
identify_cpu
(
struct
cpuinfo_ia64
*
);
...
...
include/asm-ia64/system.h
View file @
a52ccf00
...
@@ -20,9 +20,9 @@
...
@@ -20,9 +20,9 @@
#include <asm/percpu.h>
#include <asm/percpu.h>
/* 0xa000000000000000 - 0xa000000000000000+PERCPU_PAGE_SIZE remain unmapped */
/* 0xa000000000000000 - 0xa000000000000000+PERCPU_PAGE_SIZE remain unmapped */
#define PERCPU_ADDR (0xa000000000000000 + PERCPU_PAGE_SIZE)
#define GATE_ADDR (0xa000000000000000 + PERCPU_PAGE_SIZE)
#define GATE_ADDR (0xa000000000000000 + 2*PERCPU_PAGE_SIZE)
#define KERNEL_START 0xa000000100000000
#define KERNEL_START 0xa000000100000000
#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
...
...
include/asm-ia64/tlb.h
View file @
a52ccf00
...
@@ -126,7 +126,7 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
...
@@ -126,7 +126,7 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e
static
inline
struct
mmu_gather
*
static
inline
struct
mmu_gather
*
tlb_gather_mmu
(
struct
mm_struct
*
mm
,
unsigned
int
full_mm_flush
)
tlb_gather_mmu
(
struct
mm_struct
*
mm
,
unsigned
int
full_mm_flush
)
{
{
struct
mmu_gather
*
tlb
=
&
per_cpu
(
mmu_gathers
,
smp_processor_id
()
);
struct
mmu_gather
*
tlb
=
&
__get_cpu_var
(
mmu_gathers
);
tlb
->
mm
=
mm
;
tlb
->
mm
=
mm
;
/*
/*
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment