Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
16fddf54
Commit
16fddf54
authored
Mar 25, 2008
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'linux-2.6' into merge
parents
5492a7e4
cc7feea3
Changes
32
Show whitespace changes
Inline
Side-by-side
Showing
32 changed files
with
171 additions
and
245 deletions
+171
-245
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hash_utils_64.c
+8
-3
arch/powerpc/sysdev/bestcomm/bestcomm.c
arch/powerpc/sysdev/bestcomm/bestcomm.c
+6
-2
arch/powerpc/sysdev/ipic.c
arch/powerpc/sysdev/ipic.c
+1
-1
arch/sparc64/kernel/ds.c
arch/sparc64/kernel/ds.c
+1
-2
arch/sparc64/kernel/head.S
arch/sparc64/kernel/head.S
+6
-2
arch/sparc64/kernel/process.c
arch/sparc64/kernel/process.c
+0
-3
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/smp.c
+9
-8
arch/sparc64/kernel/sys_sparc32.c
arch/sparc64/kernel/sys_sparc32.c
+0
-3
arch/sparc64/kernel/trampoline.S
arch/sparc64/kernel/trampoline.S
+64
-124
arch/sparc64/mm/init.c
arch/sparc64/mm/init.c
+14
-24
arch/x86/mm/ioremap.c
arch/x86/mm/ioremap.c
+3
-3
drivers/connector/cn_queue.c
drivers/connector/cn_queue.c
+1
-1
drivers/net/bnx2x.c
drivers/net/bnx2x.c
+2
-34
drivers/net/fec_mpc52xx_phy.c
drivers/net/fec_mpc52xx_phy.c
+2
-1
drivers/net/sungem.c
drivers/net/sungem.c
+1
-1
include/asm-sparc64/hvtramp.h
include/asm-sparc64/hvtramp.h
+1
-1
include/asm-sparc64/spitfire.h
include/asm-sparc64/spitfire.h
+2
-0
include/asm-x86/io_32.h
include/asm-x86/io_32.h
+3
-3
include/asm-x86/io_64.h
include/asm-x86/io_64.h
+3
-3
include/net/sctp/sctp.h
include/net/sctp/sctp.h
+1
-1
kernel/time/timekeeping.c
kernel/time/timekeeping.c
+4
-0
lib/iomap.c
lib/iomap.c
+1
-1
net/9p/trans_fd.c
net/9p/trans_fd.c
+0
-2
net/atm/clip.c
net/atm/clip.c
+16
-3
net/atm/lec.c
net/atm/lec.c
+4
-0
net/ipv4/fib_trie.c
net/ipv4/fib_trie.c
+5
-2
net/ipv4/ip_fragment.c
net/ipv4/ip_fragment.c
+1
-1
net/ipv4/tcp.c
net/ipv4/tcp.c
+2
-2
net/ipv6/ndisc.c
net/ipv6/ndisc.c
+0
-2
net/sched/sch_htb.c
net/sched/sch_htb.c
+7
-6
net/socket.c
net/socket.c
+3
-4
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+0
-2
No files found.
arch/powerpc/mm/hash_utils_64.c
View file @
16fddf54
...
...
@@ -351,9 +351,14 @@ static void __init htab_init_page_sizes(void)
mmu_vmalloc_psize
=
MMU_PAGE_64K
;
if
(
mmu_linear_psize
==
MMU_PAGE_4K
)
mmu_linear_psize
=
MMU_PAGE_64K
;
if
(
cpu_has_feature
(
CPU_FTR_CI_LARGE_PAGE
))
if
(
cpu_has_feature
(
CPU_FTR_CI_LARGE_PAGE
))
{
/*
* Don't use 64k pages for ioremap on pSeries, since
* that would stop us accessing the HEA ethernet.
*/
if
(
!
machine_is
(
pseries
))
mmu_io_psize
=
MMU_PAGE_64K
;
else
}
else
mmu_ci_restrictions
=
1
;
}
#endif
/* CONFIG_PPC_64K_PAGES */
...
...
arch/powerpc/sysdev/bestcomm/bestcomm.c
View file @
16fddf54
...
...
@@ -52,6 +52,10 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size)
int
i
,
tasknum
=
-
1
;
struct
bcom_task
*
tsk
;
/* Don't try to do anything if bestcomm init failed */
if
(
!
bcom_eng
)
return
NULL
;
/* Get and reserve a task num */
spin_lock
(
&
bcom_eng
->
lock
);
...
...
@@ -484,8 +488,8 @@ mpc52xx_bcom_remove(struct of_device *op)
}
static
struct
of_device_id
mpc52xx_bcom_of_match
[]
=
{
{
.
type
=
"dma-controller"
,
.
compatible
=
"fsl,mpc5200-bestcomm"
,
},
{
.
type
=
"dma-controller"
,
.
compatible
=
"mpc5200-bestcomm"
,
},
{
.
compatible
=
"fsl,mpc5200-bestcomm"
,
},
{
.
compatible
=
"mpc5200-bestcomm"
,
},
{},
};
...
...
arch/powerpc/sysdev/ipic.c
View file @
16fddf54
...
...
@@ -906,7 +906,7 @@ static int __init init_ipic_sysfs(void)
{
int
rc
;
if
(
!
primary_ipic
->
regs
)
if
(
!
primary_ipic
||
!
primary_ipic
->
regs
)
return
-
ENODEV
;
printk
(
KERN_DEBUG
"Registering ipic with sysfs...
\n
"
);
...
...
arch/sparc64/kernel/ds.c
View file @
16fddf54
...
...
@@ -972,8 +972,7 @@ static void process_ds_work(void)
LIST_HEAD
(
todo
);
spin_lock_irqsave
(
&
ds_lock
,
flags
);
list_splice
(
&
ds_work_list
,
&
todo
);
INIT_LIST_HEAD
(
&
ds_work_list
);
list_splice_init
(
&
ds_work_list
,
&
todo
);
spin_unlock_irqrestore
(
&
ds_lock
,
flags
);
list_for_each_entry_safe
(
qp
,
tmp
,
&
todo
,
list
)
{
...
...
arch/sparc64/kernel/head.S
View file @
16fddf54
...
...
@@ -288,8 +288,12 @@ sun4v_chip_type:
/
*
Leave
arg2
as
-
is
,
prom_mmu_ihandle_cache
*/
mov
-
1
,
%
l3
stx
%
l3
,
[%
sp
+
2047
+
128
+
0x28
]
!
arg3
:
mode
(-
1
default
)
sethi
%
hi
(
8
*
1024
*
1024
),
%
l3
stx
%
l3
,
[%
sp
+
2047
+
128
+
0x30
]
!
arg4
:
size
(
8
MB
)
/
*
4
MB
align
the
kernel
image
size
.
*/
set
(
_end
-
KERNBASE
),
%
l3
set
((
4
*
1024
*
1024
)
-
1
),
%
l4
add
%
l3
,
%
l4
,
%
l3
andn
%
l3
,
%
l4
,
%
l3
stx
%
l3
,
[%
sp
+
2047
+
128
+
0x30
]
!
arg4
:
roundup
(
ksize
,
4
MB
)
sethi
%
hi
(
KERNBASE
),
%
l3
stx
%
l3
,
[%
sp
+
2047
+
128
+
0x38
]
!
arg5
:
vaddr
(
KERNBASE
)
stx
%
g0
,
[%
sp
+
2047
+
128
+
0x40
]
!
arg6
:
empty
...
...
arch/sparc64/kernel/process.c
View file @
16fddf54
...
...
@@ -731,9 +731,6 @@ asmlinkage int sparc_execve(struct pt_regs *regs)
current_thread_info
()
->
xfsr
[
0
]
=
0
;
current_thread_info
()
->
fpsaved
[
0
]
=
0
;
regs
->
tstate
&=
~
TSTATE_PEF
;
task_lock
(
current
);
current
->
ptrace
&=
~
PT_DTRACE
;
task_unlock
(
current
);
}
out:
return
error
;
...
...
arch/sparc64/kernel/smp.c
View file @
16fddf54
...
...
@@ -284,14 +284,17 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
{
extern
unsigned
long
sparc64_ttable_tl0
;
extern
unsigned
long
kern_locked_tte_data
;
extern
int
bigkernel
;
struct
hvtramp_descr
*
hdesc
;
unsigned
long
trampoline_ra
;
struct
trap_per_cpu
*
tb
;
u64
tte_vaddr
,
tte_data
;
unsigned
long
hv_err
;
int
i
;
hdesc
=
kzalloc
(
sizeof
(
*
hdesc
),
GFP_KERNEL
);
hdesc
=
kzalloc
(
sizeof
(
*
hdesc
)
+
(
sizeof
(
struct
hvtramp_mapping
)
*
num_kernel_image_mappings
-
1
),
GFP_KERNEL
);
if
(
!
hdesc
)
{
printk
(
KERN_ERR
"ldom_startcpu_cpuid: Cannot allocate "
"hvtramp_descr.
\n
"
);
...
...
@@ -299,7 +302,7 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
}
hdesc
->
cpu
=
cpu
;
hdesc
->
num_mappings
=
(
bigkernel
?
2
:
1
)
;
hdesc
->
num_mappings
=
num_kernel_image_mappings
;
tb
=
&
trap_block
[
cpu
];
tb
->
hdesc
=
hdesc
;
...
...
@@ -312,13 +315,11 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
tte_vaddr
=
(
unsigned
long
)
KERNBASE
;
tte_data
=
kern_locked_tte_data
;
hdesc
->
maps
[
0
].
vaddr
=
tte_vaddr
;
hdesc
->
maps
[
0
].
tte
=
tte_data
;
if
(
bigkernel
)
{
for
(
i
=
0
;
i
<
hdesc
->
num_mappings
;
i
++
)
{
hdesc
->
maps
[
i
].
vaddr
=
tte_vaddr
;
hdesc
->
maps
[
i
].
tte
=
tte_data
;
tte_vaddr
+=
0x400000
;
tte_data
+=
0x400000
;
hdesc
->
maps
[
1
].
vaddr
=
tte_vaddr
;
hdesc
->
maps
[
1
].
tte
=
tte_data
;
}
trampoline_ra
=
kimage_addr_to_ra
(
hv_cpu_startup
);
...
...
arch/sparc64/kernel/sys_sparc32.c
View file @
16fddf54
...
...
@@ -678,9 +678,6 @@ asmlinkage long sparc32_execve(struct pt_regs *regs)
current_thread_info
()
->
xfsr
[
0
]
=
0
;
current_thread_info
()
->
fpsaved
[
0
]
=
0
;
regs
->
tstate
&=
~
TSTATE_PEF
;
task_lock
(
current
);
current
->
ptrace
&=
~
PT_DTRACE
;
task_unlock
(
current
);
}
out:
return
error
;
...
...
arch/sparc64/kernel/trampoline.S
View file @
16fddf54
...
...
@@ -105,7 +105,7 @@ startup_continue:
wr
%
g2
,
0
,
%
tick_cmpr
/
*
Call
OBP
by
hand
to
lock
KERNBASE
into
i
/
d
tlbs
.
*
We
lock
2
consequetive
entries
if
we
are
'bigkernel'
.
*
We
lock
'num_kernel_image_mappings'
consequetive
entries
.
*/
sethi
%
hi
(
prom_entry_lock
),
%
g2
1
:
ldstub
[%
g2
+
%
lo
(
prom_entry_lock
)],
%
g1
...
...
@@ -119,6 +119,29 @@ startup_continue:
add
%
l2
,
-(
192
+
128
),
%
sp
flushw
/
*
Setup
the
loop
variables
:
*
%
l3
:
VADDR
base
*
%
l4
:
TTE
base
*
%
l5
:
Loop
iterator
,
iterates
from
0
to
'num_kernel_image_mappings'
*
%
l6
:
Number
of
TTE
entries
to
map
*
%
l7
:
Highest
TTE
entry
number
,
we
count
down
*/
sethi
%
hi
(
KERNBASE
),
%
l3
sethi
%
hi
(
kern_locked_tte_data
),
%
l4
ldx
[%
l4
+
%
lo
(
kern_locked_tte_data
)],
%
l4
clr
%
l5
sethi
%
hi
(
num_kernel_image_mappings
),
%
l6
lduw
[%
l6
+
%
lo
(
num_kernel_image_mappings
)],
%
l6
add
%
l6
,
1
,
%
l6
mov
15
,
%
l7
BRANCH_IF_ANY_CHEETAH
(
g1
,
g5
,2
f
)
mov
63
,
%
l7
2
:
3
:
/
*
Lock
into
I
-
MMU
*/
sethi
%
hi
(
call_method
),
%
g2
or
%
g2
,
%
lo
(
call_method
),
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x00
]
...
...
@@ -132,63 +155,26 @@ startup_continue:
sethi
%
hi
(
prom_mmu_ihandle_cache
),
%
g2
lduw
[%
g2
+
%
lo
(
prom_mmu_ihandle_cache
)],
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x20
]
sethi
%
hi
(
KERNBASE
),
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x28
]
sethi
%
hi
(
kern_locked_tte_data
),
%
g2
ldx
[%
g2
+
%
lo
(
kern_locked_tte_data
)],
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x30
]
mov
15
,
%
g2
BRANCH_IF_ANY_CHEETAH
(
g1
,
g5
,1
f
)
mov
63
,
%
g2
1
:
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x38
]
sethi
%
hi
(
p1275buf
),
%
g2
or
%
g2
,
%
lo
(
p1275buf
),
%
g2
ldx
[%
g2
+
0x08
],
%
o1
call
%
o1
add
%
sp
,
(
2047
+
128
),
%
o0
/
*
Each
TTE
maps
4
MB
,
convert
index
to
offset
.
*/
sllx
%
l5
,
22
,
%
g1
sethi
%
hi
(
bigkernel
)
,
%
g2
lduw
[%
g2
+
%
lo
(
bigkernel
)],
%
g2
brz
,
pt
%
g2
,
do_dtlb
nop
add
%
l3
,
%
g1
,
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x28
]
!
VADDR
add
%
l4
,
%
g1
,
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x30
]
!
TTE
sethi
%
hi
(
call_method
),
%
g2
or
%
g2
,
%
lo
(
call_method
),
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x00
]
mov
5
,
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x08
]
mov
1
,
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x10
]
sethi
%
hi
(
itlb_load
),
%
g2
or
%
g2
,
%
lo
(
itlb_load
),
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x18
]
sethi
%
hi
(
prom_mmu_ihandle_cache
),
%
g2
lduw
[%
g2
+
%
lo
(
prom_mmu_ihandle_cache
)],
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x20
]
sethi
%
hi
(
KERNBASE
+
0x400000
),
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x28
]
sethi
%
hi
(
kern_locked_tte_data
),
%
g2
ldx
[%
g2
+
%
lo
(
kern_locked_tte_data
)],
%
g2
sethi
%
hi
(
0x400000
),
%
g1
add
%
g2
,
%
g1
,
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x30
]
mov
14
,
%
g2
BRANCH_IF_ANY_CHEETAH
(
g1
,
g5
,1
f
)
mov
62
,
%
g2
1
:
/
*
TTE
index
is
highest
minus
loop
index
.
*/
sub
%
l7
,
%
l5
,
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x38
]
sethi
%
hi
(
p1275buf
),
%
g2
or
%
g2
,
%
lo
(
p1275buf
),
%
g2
ldx
[%
g2
+
0x08
],
%
o1
call
%
o1
add
%
sp
,
(
2047
+
128
),
%
o0
do_dtlb
:
/
*
Lock
into
D
-
MMU
*/
sethi
%
hi
(
call_method
),
%
g2
or
%
g2
,
%
lo
(
call_method
),
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x00
]
...
...
@@ -202,65 +188,30 @@ do_dtlb:
sethi
%
hi
(
prom_mmu_ihandle_cache
),
%
g2
lduw
[%
g2
+
%
lo
(
prom_mmu_ihandle_cache
)],
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x20
]
sethi
%
hi
(
KERNBASE
),
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x28
]
sethi
%
hi
(
kern_locked_tte_data
),
%
g2
ldx
[%
g2
+
%
lo
(
kern_locked_tte_data
)],
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x30
]
mov
15
,
%
g2
BRANCH_IF_ANY_CHEETAH
(
g1
,
g5
,1
f
)
/
*
Each
TTE
maps
4
MB
,
convert
index
to
offset
.
*/
sllx
%
l5
,
22
,
%
g1
mov
63
,
%
g2
1
:
add
%
l3
,
%
g1
,
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x28
]
!
VADDR
add
%
l4
,
%
g1
,
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x30
]
!
TTE
/
*
TTE
index
is
highest
minus
loop
index
.
*/
sub
%
l7
,
%
l5
,
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x38
]
sethi
%
hi
(
p1275buf
),
%
g2
or
%
g2
,
%
lo
(
p1275buf
),
%
g2
ldx
[%
g2
+
0x08
],
%
o1
call
%
o1
add
%
sp
,
(
2047
+
128
),
%
o0
sethi
%
hi
(
bigkernel
),
%
g2
lduw
[%
g2
+
%
lo
(
bigkernel
)],
%
g2
b
rz
,
pt
%
g2
,
do_unlock
add
%
l5
,
1
,
%
l5
cmp
%
l5
,
%
l6
b
ne
,
pt
%
xcc
,
3
b
nop
sethi
%
hi
(
call_method
),
%
g2
or
%
g2
,
%
lo
(
call_method
),
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x00
]
mov
5
,
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x08
]
mov
1
,
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x10
]
sethi
%
hi
(
dtlb_load
),
%
g2
or
%
g2
,
%
lo
(
dtlb_load
),
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x18
]
sethi
%
hi
(
prom_mmu_ihandle_cache
),
%
g2
lduw
[%
g2
+
%
lo
(
prom_mmu_ihandle_cache
)],
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x20
]
sethi
%
hi
(
KERNBASE
+
0x400000
),
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x28
]
sethi
%
hi
(
kern_locked_tte_data
),
%
g2
ldx
[%
g2
+
%
lo
(
kern_locked_tte_data
)],
%
g2
sethi
%
hi
(
0x400000
),
%
g1
add
%
g2
,
%
g1
,
%
g2
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x30
]
mov
14
,
%
g2
BRANCH_IF_ANY_CHEETAH
(
g1
,
g5
,1
f
)
mov
62
,
%
g2
1
:
stx
%
g2
,
[%
sp
+
2047
+
128
+
0x38
]
sethi
%
hi
(
p1275buf
),
%
g2
or
%
g2
,
%
lo
(
p1275buf
),
%
g2
ldx
[%
g2
+
0x08
],
%
o1
call
%
o1
add
%
sp
,
(
2047
+
128
),
%
o0
do_unlock
:
sethi
%
hi
(
prom_entry_lock
),
%
g2
stb
%
g0
,
[%
g2
+
%
lo
(
prom_entry_lock
)]
membar
#
StoreStore
|
#
StoreLoad
...
...
@@ -269,47 +220,36 @@ do_unlock:
nop
niagara_lock_tlb
:
sethi
%
hi
(
KERNBASE
),
%
l3
sethi
%
hi
(
kern_locked_tte_data
),
%
l4
ldx
[%
l4
+
%
lo
(
kern_locked_tte_data
)],
%
l4
clr
%
l5
sethi
%
hi
(
num_kernel_image_mappings
),
%
l6
lduw
[%
l6
+
%
lo
(
num_kernel_image_mappings
)],
%
l6
add
%
l6
,
1
,
%
l6
1
:
mov
HV_FAST_MMU_MAP_PERM_ADDR
,
%
o5
sethi
%
hi
(
KERNBASE
),
%
o0
sllx
%
l5
,
22
,
%
g2
add
%
l3
,
%
g2
,
%
o0
clr
%
o1
sethi
%
hi
(
kern_locked_tte_data
),
%
o2
ldx
[%
o2
+
%
lo
(
kern_locked_tte_data
)],
%
o2
add
%
l4
,
%
g2
,
%
o2
mov
HV_MMU_IMMU
,
%
o3
ta
HV_FAST_TRAP
mov
HV_FAST_MMU_MAP_PERM_ADDR
,
%
o5
sethi
%
hi
(
KERNBASE
),
%
o0
sllx
%
l5
,
22
,
%
g2
add
%
l3
,
%
g2
,
%
o0
clr
%
o1
sethi
%
hi
(
kern_locked_tte_data
),
%
o2
ldx
[%
o2
+
%
lo
(
kern_locked_tte_data
)],
%
o2
add
%
l4
,
%
g2
,
%
o2
mov
HV_MMU_DMMU
,
%
o3
ta
HV_FAST_TRAP
sethi
%
hi
(
bigkernel
),
%
g2
lduw
[%
g2
+
%
lo
(
bigkernel
)],
%
g2
b
rz
,
pt
%
g2
,
after_lock_tl
b
add
%
l5
,
1
,
%
l5
cmp
%
l5
,
%
l6
b
ne
,
pt
%
xcc
,
1
b
nop
mov
HV_FAST_MMU_MAP_PERM_ADDR
,
%
o5
sethi
%
hi
(
KERNBASE
+
0x400000
),
%
o0
clr
%
o1
sethi
%
hi
(
kern_locked_tte_data
),
%
o2
ldx
[%
o2
+
%
lo
(
kern_locked_tte_data
)],
%
o2
sethi
%
hi
(
0x400000
),
%
o3
add
%
o2
,
%
o3
,
%
o2
mov
HV_MMU_IMMU
,
%
o3
ta
HV_FAST_TRAP
mov
HV_FAST_MMU_MAP_PERM_ADDR
,
%
o5
sethi
%
hi
(
KERNBASE
+
0x400000
),
%
o0
clr
%
o1
sethi
%
hi
(
kern_locked_tte_data
),
%
o2
ldx
[%
o2
+
%
lo
(
kern_locked_tte_data
)],
%
o2
sethi
%
hi
(
0x400000
),
%
o3
add
%
o2
,
%
o3
,
%
o2
mov
HV_MMU_DMMU
,
%
o3
ta
HV_FAST_TRAP
after_lock_tlb
:
wrpr
%
g0
,
(
PSTATE_PRIV
|
PSTATE_PEF
),
%
pstate
wr
%
g0
,
0
,
%
fprs
...
...
arch/sparc64/mm/init.c
View file @
16fddf54
...
...
@@ -166,7 +166,7 @@ unsigned long sparc64_kern_pri_context __read_mostly;
unsigned
long
sparc64_kern_pri_nuc_bits
__read_mostly
;
unsigned
long
sparc64_kern_sec_context
__read_mostly
;
int
bigkernel
=
0
;
int
num_kernel_image_mappings
;
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_t
dcpage_flushes
=
ATOMIC_INIT
(
0
);
...
...
@@ -572,7 +572,7 @@ static unsigned long kern_large_tte(unsigned long paddr);
static
void
__init
remap_kernel
(
void
)
{
unsigned
long
phys_page
,
tte_vaddr
,
tte_data
;
int
tlb_ent
=
sparc64_highest_locked_tlbent
();
int
i
,
tlb_ent
=
sparc64_highest_locked_tlbent
();
tte_vaddr
=
(
unsigned
long
)
KERNBASE
;
phys_page
=
(
prom_boot_mapping_phys_low
>>
22UL
)
<<
22UL
;
...
...
@@ -582,27 +582,20 @@ static void __init remap_kernel(void)
/* Now lock us into the TLBs via Hypervisor or OBP. */
if
(
tlb_type
==
hypervisor
)
{
for
(
i
=
0
;
i
<
num_kernel_image_mappings
;
i
++
)
{
hypervisor_tlb_lock
(
tte_vaddr
,
tte_data
,
HV_MMU_DMMU
);
hypervisor_tlb_lock
(
tte_vaddr
,
tte_data
,
HV_MMU_IMMU
);
if
(
bigkernel
)
{
tte_vaddr
+=
0x400000
;
tte_data
+=
0x400000
;
hypervisor_tlb_lock
(
tte_vaddr
,
tte_data
,
HV_MMU_DMMU
);
hypervisor_tlb_lock
(
tte_vaddr
,
tte_data
,
HV_MMU_IMMU
);
}
}
else
{
prom_dtlb_load
(
tlb_ent
,
tte_data
,
tte_vaddr
);
prom_itlb_load
(
tlb_ent
,
tte_data
,
tte_vaddr
);
if
(
bigkernel
)
{
tlb_ent
-=
1
;
prom_dtlb_load
(
tlb_ent
,
tte_data
+
0x400000
,
tte_vaddr
+
0x400000
);
prom_itlb_load
(
tlb_ent
,
tte_data
+
0x400000
,
tte_vaddr
+
0x400000
);
}
sparc64_highest_unlocked_tlb_ent
=
tlb_ent
-
1
;
for
(
i
=
0
;
i
<
num_kernel_image_mappings
;
i
++
)
{
prom_dtlb_load
(
tlb_ent
-
i
,
tte_data
,
tte_vaddr
);
prom_itlb_load
(
tlb_ent
-
i
,
tte_data
,
tte_vaddr
);
tte_vaddr
+=
0x400000
;
tte_data
+=
0x400000
;
}
sparc64_highest_unlocked_tlb_ent
=
tlb_ent
-
i
;
}
if
(
tlb_type
==
cheetah_plus
)
{
sparc64_kern_pri_context
=
(
CTX_CHEETAH_PLUS_CTX0
|
...
...
@@ -1352,12 +1345,9 @@ void __init paging_init(void)
shift
=
kern_base
+
PAGE_OFFSET
-
((
unsigned
long
)
KERNBASE
);
real_end
=
(
unsigned
long
)
_end
;
if
((
real_end
>
((
unsigned
long
)
KERNBASE
+
0x400000
)))
bigkernel
=
1
;
if
((
real_end
>
((
unsigned
long
)
KERNBASE
+
0x800000
)))
{
prom_printf
(
"paging_init: Kernel > 8MB, too large.
\n
"
);
prom_halt
();
}
num_kernel_image_mappings
=
DIV_ROUND_UP
(
real_end
-
KERNBASE
,
1
<<
22
);
printk
(
"Kernel: Using %d locked TLB entries for main kernel image.
\n
"
,
num_kernel_image_mappings
);
/* Set kernel pgd to upper alias so physical page computations
* work.
...
...
arch/x86/mm/ioremap.c
View file @
16fddf54
...
...
@@ -106,7 +106,7 @@ static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
static
void
__iomem
*
__ioremap
(
unsigned
long
phys_addr
,
unsigned
long
size
,
static
void
__iomem
*
__ioremap
(
resource_size_t
phys_addr
,
unsigned
long
size
,
enum
ioremap_mode
mode
)
{
unsigned
long
pfn
,
offset
,
last_addr
,
vaddr
;
...
...
@@ -193,13 +193,13 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
*
* Must be freed with iounmap.
*/
void
__iomem
*
ioremap_nocache
(
unsigned
long
phys_addr
,
unsigned
long
size
)
void
__iomem
*
ioremap_nocache
(
resource_size_t
phys_addr
,
unsigned
long
size
)
{
return
__ioremap
(
phys_addr
,
size
,
IOR_MODE_UNCACHED
);
}
EXPORT_SYMBOL
(
ioremap_nocache
);
void
__iomem
*
ioremap_cache
(
unsigned
long
phys_addr
,
unsigned
long
size
)
void
__iomem
*
ioremap_cache
(
resource_size_t
phys_addr
,
unsigned
long
size
)
{
return
__ioremap
(
phys_addr
,
size
,
IOR_MODE_CACHED
);
}
...
...
drivers/connector/cn_queue.c
View file @
16fddf54
...
...
@@ -146,7 +146,7 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
dev
->
nls
=
nls
;
dev
->
cn_queue
=
create_workqueue
(
dev
->
name
);
dev
->
cn_queue
=
create_
singlethread_
workqueue
(
dev
->
name
);
if
(
!
dev
->
cn_queue
)
{
kfree
(
dev
);
return
NULL
;
...
...
drivers/net/bnx2x.c
View file @
16fddf54
...
...
@@ -63,8 +63,8 @@
#include "bnx2x.h"
#include "bnx2x_init.h"
#define DRV_MODULE_VERSION "1.4
0.22
"
#define DRV_MODULE_RELDATE "200
7/11/27
"
#define DRV_MODULE_VERSION "1.4
2.3
"
#define DRV_MODULE_RELDATE "200
8/3/9
"
#define BNX2X_BC_VER 0x040200
/* Time in jiffies before concluding the transmitter is hung. */
...
...
@@ -8008,38 +8008,6 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd
->
duplex
,
cmd
->
port
,
cmd
->
phy_address
,
cmd
->
transceiver
,
cmd
->
autoneg
,
cmd
->
maxtxpkt
,
cmd
->
maxrxpkt
);
switch
(
cmd
->
port
)
{
case
PORT_TP
:
if
(
!
(
bp
->
supported
&
SUPPORTED_TP
))
{
DP
(
NETIF_MSG_LINK
,
"TP not supported
\n
"
);
return
-
EINVAL
;
}
if
(
bp
->
phy_flags
&
PHY_XGXS_FLAG
)
{
bnx2x_link_reset
(
bp
);
bnx2x_link_settings_supported
(
bp
,
SWITCH_CFG_1G
);
bnx2x_phy_deassert
(
bp
);
}
break
;
case
PORT_FIBRE
:
if
(
!
(
bp
->
supported
&
SUPPORTED_FIBRE
))
{
DP
(
NETIF_MSG_LINK
,
"FIBRE not supported
\n
"
);
return
-
EINVAL
;
}
if
(
!
(
bp
->
phy_flags
&
PHY_XGXS_FLAG
))
{
bnx2x_link_reset
(
bp
);
bnx2x_link_settings_supported
(
bp
,
SWITCH_CFG_10G
);
bnx2x_phy_deassert
(
bp
);
}
break
;
default:
DP
(
NETIF_MSG_LINK
,
"Unknown port type
\n
"
);
return
-
EINVAL
;
}
if
(
cmd
->
autoneg
==
AUTONEG_ENABLE
)
{
if
(
!
(
bp
->
supported
&
SUPPORTED_Autoneg
))
{
DP
(
NETIF_MSG_LINK
,
"Aotoneg not supported
\n
"
);
...
...
drivers/net/fec_mpc52xx_phy.c
View file @
16fddf54
...
...
@@ -109,6 +109,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i
int
irq
=
irq_of_parse_and_map
(
child
,
0
);
if
(
irq
!=
NO_IRQ
)
{
const
u32
*
id
=
of_get_property
(
child
,
"reg"
,
NULL
);
if
(
id
)
bus
->
irq
[
*
id
]
=
irq
;
}
}
...
...
drivers/net/sungem.c
View file @
16fddf54
...
...
@@ -912,7 +912,7 @@ static int gem_poll(struct napi_struct *napi, int budget)
* rx ring - must call napi_disable(), which
* schedule_timeout()'s if polling is already disabled.
*/
work_done
+=
gem_rx
(
gp
,
budget
);
work_done
+=
gem_rx
(
gp
,
budget
-
work_done
);
if
(
work_done
>=
budget
)
return
work_done
;
...
...
include/asm-sparc64/hvtramp.h
View file @
16fddf54
...
...
@@ -16,7 +16,7 @@ struct hvtramp_descr {
__u64
fault_info_va
;
__u64
fault_info_pa
;
__u64
thread_reg
;
struct
hvtramp_mapping
maps
[
2
];
struct
hvtramp_mapping
maps
[
1
];
};
extern
void
hv_cpu_startup
(
unsigned
long
hvdescr_pa
);
...
...
include/asm-sparc64/spitfire.h
View file @
16fddf54
...
...
@@ -63,6 +63,8 @@ extern void cheetah_enable_pcache(void);
SPITFIRE_HIGHEST_LOCKED_TLBENT : \
CHEETAH_HIGHEST_LOCKED_TLBENT)
extern
int
num_kernel_image_mappings
;
/* The data cache is write through, so this just invalidates the
* specified line.
*/
...
...
include/asm-x86/io_32.h
View file @
16fddf54
...
...
@@ -114,13 +114,13 @@ static inline void * phys_to_virt(unsigned long address)
* If the area you are trying to map is a PCI BAR you should have a
* look at pci_iomap().
*/
extern
void
__iomem
*
ioremap_nocache
(
unsigned
long
offset
,
unsigned
long
size
);
extern
void
__iomem
*
ioremap_cache
(
unsigned
long
offset
,
unsigned
long
size
);
extern
void
__iomem
*
ioremap_nocache
(
resource_size_t
offset
,
unsigned
long
size
);
extern
void
__iomem
*
ioremap_cache
(
resource_size_t
offset
,
unsigned
long
size
);
/*
* The default ioremap() behavior is non-cached:
*/
static
inline
void
__iomem
*
ioremap
(
unsigned
long
offset
,
unsigned
long
size
)
static
inline
void
__iomem
*
ioremap
(
resource_size_t
offset
,
unsigned
long
size
)
{
return
ioremap_nocache
(
offset
,
size
);
}
...
...
include/asm-x86/io_64.h
View file @
16fddf54
...
...
@@ -158,13 +158,13 @@ extern void early_iounmap(void *addr, unsigned long size);
* it's useful if some control registers are in such an area and write combining
* or read caching is not desirable:
*/
extern
void
__iomem
*
ioremap_nocache
(
unsigned
long
offset
,
unsigned
long
size
);
extern
void
__iomem
*
ioremap_cache
(
unsigned
long
offset
,
unsigned
long
size
);
extern
void
__iomem
*
ioremap_nocache
(
resource_size_t
offset
,
unsigned
long
size
);
extern
void
__iomem
*
ioremap_cache
(
resource_size_t
offset
,
unsigned
long
size
);
/*
* The default ioremap() behavior is non-cached:
*/
static
inline
void
__iomem
*
ioremap
(
unsigned
long
offset
,
unsigned
long
size
)
static
inline
void
__iomem
*
ioremap
(
resource_size_t
offset
,
unsigned
long
size
)
{
return
ioremap_nocache
(
offset
,
size
);
}
...
...
include/net/sctp/sctp.h
View file @
16fddf54
...
...
@@ -389,7 +389,7 @@ void sctp_v6_del_protocol(void);
#else
/* #ifdef defined(CONFIG_IPV6) */
static
inline
void
sctp_v6_pf_init
(
void
)
{
return
0
;
}
static
inline
void
sctp_v6_pf_init
(
void
)
{
return
;
}
static
inline
void
sctp_v6_pf_exit
(
void
)
{
return
;
}
static
inline
int
sctp_v6_protosw_init
(
void
)
{
return
0
;
}
static
inline
void
sctp_v6_protosw_exit
(
void
)
{
return
;
}
...
...
kernel/time/timekeeping.c
View file @
16fddf54
...
...
@@ -191,8 +191,12 @@ static void change_clocksource(void)
tick_clock_notify
();
/*
* We're holding xtime lock and waking up klogd would deadlock
* us on enqueue. So no printing!
printk(KERN_INFO "Time: %s clocksource has been installed.\n",
clock->name);
*/
}
#else
static
inline
void
change_clocksource
(
void
)
{
}
...
...
lib/iomap.c
View file @
16fddf54
...
...
@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioport_unmap);
* */
void
__iomem
*
pci_iomap
(
struct
pci_dev
*
dev
,
int
bar
,
unsigned
long
maxlen
)
{
unsigned
long
start
=
pci_resource_start
(
dev
,
bar
);
resource_size_t
start
=
pci_resource_start
(
dev
,
bar
);
unsigned
long
len
=
pci_resource_len
(
dev
,
bar
);
unsigned
long
flags
=
pci_resource_flags
(
dev
,
bar
);
...
...
net/9p/trans_fd.c
View file @
16fddf54
...
...
@@ -861,7 +861,6 @@ static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req)
static
void
p9_mux_flush_cb
(
struct
p9_req
*
freq
,
void
*
a
)
{
p9_conn_req_callback
cb
;
int
tag
;
struct
p9_conn
*
m
;
struct
p9_req
*
req
,
*
rreq
,
*
rptr
;
...
...
@@ -872,7 +871,6 @@ static void p9_mux_flush_cb(struct p9_req *freq, void *a)
freq
->
tcall
->
params
.
tflush
.
oldtag
);
spin_lock
(
&
m
->
lock
);
cb
=
NULL
;
tag
=
freq
->
tcall
->
params
.
tflush
.
oldtag
;
req
=
NULL
;
list_for_each_entry_safe
(
rreq
,
rptr
,
&
m
->
req_list
,
req_list
)
{
...
...
net/atm/clip.c
View file @
16fddf54
...
...
@@ -947,6 +947,8 @@ static const struct file_operations arp_seq_fops = {
};
#endif
static
void
atm_clip_exit_noproc
(
void
);
static
int
__init
atm_clip_init
(
void
)
{
neigh_table_init_no_netlink
(
&
clip_tbl
);
...
...
@@ -963,18 +965,22 @@ static int __init atm_clip_init(void)
struct
proc_dir_entry
*
p
;
p
=
proc_create
(
"arp"
,
S_IRUGO
,
atm_proc_root
,
&
arp_seq_fops
);
if
(
!
p
)
{
printk
(
KERN_ERR
"Unable to initialize "
"/proc/net/atm/arp
\n
"
);
atm_clip_exit_noproc
();
return
-
ENOMEM
;
}
}
#endif
return
0
;
}
static
void
__exit
atm_clip_exit
(
void
)
static
void
atm_clip_exit_noproc
(
void
)
{
struct
net_device
*
dev
,
*
next
;
remove_proc_entry
(
"arp"
,
atm_proc_root
);
unregister_inetaddr_notifier
(
&
clip_inet_notifier
);
unregister_netdevice_notifier
(
&
clip_dev_notifier
);
...
...
@@ -1005,6 +1011,13 @@ static void __exit atm_clip_exit(void)
clip_tbl_hook
=
NULL
;
}
static
void
__exit
atm_clip_exit
(
void
)
{
remove_proc_entry
(
"arp"
,
atm_proc_root
);
atm_clip_exit_noproc
();
}
module_init
(
atm_clip_init
);
module_exit
(
atm_clip_exit
);
MODULE_AUTHOR
(
"Werner Almesberger"
);
...
...
net/atm/lec.c
View file @
16fddf54
...
...
@@ -1250,6 +1250,10 @@ static int __init lane_module_init(void)
struct
proc_dir_entry
*
p
;
p
=
proc_create
(
"lec"
,
S_IRUGO
,
atm_proc_root
,
&
lec_seq_fops
);
if
(
!
p
)
{
printk
(
KERN_ERR
"Unable to initialize /proc/net/atm/lec
\n
"
);
return
-
ENOMEM
;
}
#endif
register_atm_ioctl
(
&
lane_ioctl_ops
);
...
...
net/ipv4/fib_trie.c
View file @
16fddf54
...
...
@@ -177,10 +177,13 @@ static inline struct tnode *node_parent_rcu(struct node *node)
return
rcu_dereference
(
ret
);
}
/* Same as rcu_assign_pointer
* but that macro() assumes that value is a pointer.
*/
static
inline
void
node_set_parent
(
struct
node
*
node
,
struct
tnode
*
ptr
)
{
rcu_assign_pointer
(
node
->
parent
,
(
unsigned
long
)
ptr
|
NODE_TYPE
(
node
)
);
smp_wmb
();
node
->
parent
=
(
unsigned
long
)
ptr
|
NODE_TYPE
(
node
);
}
static
inline
struct
node
*
tnode_get_child
(
struct
tnode
*
tn
,
unsigned
int
i
)
...
...
net/ipv4/ip_fragment.c
View file @
16fddf54
...
...
@@ -568,7 +568,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
IP_INC_STATS_BH
(
IPSTATS_MIB_REASMREQDS
);
net
=
skb
->
dev
->
nd_net
;
net
=
skb
->
dev
?
skb
->
dev
->
nd_net
:
skb
->
dst
->
dev
->
nd_net
;
/* Start by cleaning up the memory. */
if
(
atomic_read
(
&
net
->
ipv4
.
frags
.
mem
)
>
net
->
ipv4
.
frags
.
high_thresh
)
ip_evictor
(
net
);
...
...
net/ipv4/tcp.c
View file @
16fddf54
...
...
@@ -735,7 +735,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
if
(
!
(
psize
-=
copy
))
goto
out
;
if
(
skb
->
len
<
mss_now
||
(
flags
&
MSG_OOB
))
if
(
skb
->
len
<
size_goal
||
(
flags
&
MSG_OOB
))
continue
;
if
(
forced_push
(
tp
))
{
...
...
@@ -981,7 +981,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if
((
seglen
-=
copy
)
==
0
&&
iovlen
==
0
)
goto
out
;
if
(
skb
->
len
<
mss_now
||
(
flags
&
MSG_OOB
))
if
(
skb
->
len
<
size_goal
||
(
flags
&
MSG_OOB
))
continue
;
if
(
forced_push
(
tp
))
{
...
...
net/ipv6/ndisc.c
View file @
16fddf54
...
...
@@ -1420,7 +1420,6 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
u8
*
opt
;
int
rd_len
;
int
err
;
int
hlen
;
u8
ha_buf
[
MAX_ADDR_LEN
],
*
ha
=
NULL
;
dev
=
skb
->
dev
;
...
...
@@ -1491,7 +1490,6 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
return
;
}
hlen
=
0
;
skb_reserve
(
buff
,
LL_RESERVED_SPACE
(
dev
));
ip6_nd_hdr
(
sk
,
buff
,
dev
,
&
saddr_buf
,
&
ipv6_hdr
(
skb
)
->
saddr
,
...
...
net/sched/sch_htb.c
View file @
16fddf54
...
...
@@ -711,9 +711,11 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
*/
static
psched_time_t
htb_do_events
(
struct
htb_sched
*
q
,
int
level
)
{
int
i
;
for
(
i
=
0
;
i
<
500
;
i
++
)
{
/* don't run for longer than 2 jiffies; 2 is used instead of
1 to simplify things when jiffy is going to be incremented
too soon */
unsigned
long
stop_at
=
jiffies
+
2
;
while
(
time_before
(
jiffies
,
stop_at
))
{
struct
htb_class
*
cl
;
long
diff
;
struct
rb_node
*
p
=
rb_first
(
&
q
->
wait_pq
[
level
]);
...
...
@@ -731,9 +733,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level)
if
(
cl
->
cmode
!=
HTB_CAN_SEND
)
htb_add_to_wait_tree
(
q
,
cl
,
diff
);
}
if
(
net_ratelimit
())
printk
(
KERN_WARNING
"htb: too many events !
\n
"
);
return
q
->
now
+
PSCHED_TICKS_PER_SEC
/
10
;
/* too much load - let's continue on next jiffie */
return
q
->
now
+
PSCHED_TICKS_PER_SEC
/
HZ
;
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
...
...
net/socket.c
View file @
16fddf54
...
...
@@ -909,11 +909,10 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
if
(
!
dlci_ioctl_hook
)
request_module
(
"dlci"
);
if
(
dlci_ioctl_hook
)
{
mutex_lock
(
&
dlci_ioctl_mutex
);
if
(
dlci_ioctl_hook
)
err
=
dlci_ioctl_hook
(
cmd
,
argp
);
mutex_unlock
(
&
dlci_ioctl_mutex
);
}
break
;
default:
err
=
sock
->
ops
->
ioctl
(
sock
,
cmd
,
arg
);
...
...
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
View file @
16fddf54
...
...
@@ -237,14 +237,12 @@ static void rdma_set_ctxt_sge(struct svc_rdma_op_ctxt *ctxt,
static
int
rdma_read_max_sge
(
struct
svcxprt_rdma
*
xprt
,
int
sge_count
)
{
#ifdef RDMA_TRANSPORT_IWARP
if
((
RDMA_TRANSPORT_IWARP
==
rdma_node_get_transport
(
xprt
->
sc_cm_id
->
device
->
node_type
))
&&
sge_count
>
1
)
return
1
;
else
#endif
return
min_t
(
int
,
sge_count
,
xprt
->
sc_max_sge
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment