Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
868f24fc
Commit
868f24fc
authored
Jun 05, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge master.kernel.org:/home/davem/BK/misc-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
a9274e96
1cd013d0
Changes
21
Show whitespace changes
Inline
Side-by-side
Showing
21 changed files
with
2309 additions
and
2239 deletions
+2309
-2239
CREDITS
CREDITS
+4
-0
arch/sparc/kernel/devices.c
arch/sparc/kernel/devices.c
+28
-29
arch/sparc/mm/sun4c.c
arch/sparc/mm/sun4c.c
+71
-460
arch/sparc64/kernel/Makefile
arch/sparc64/kernel/Makefile
+0
-5
arch/sparc64/kernel/sys_sparc32.c
arch/sparc64/kernel/sys_sparc32.c
+25
-16
arch/sparc64/mm/generic.c
arch/sparc64/mm/generic.c
+3
-13
drivers/net/bonding.c
drivers/net/bonding.c
+266
-29
include/asm-sparc/btfixup.h
include/asm-sparc/btfixup.h
+16
-27
include/asm-sparc/page.h
include/asm-sparc/page.h
+2
-2
include/asm-sparc/ultra.h
include/asm-sparc/ultra.h
+0
-52
include/asm-sparc/vac-ops.h
include/asm-sparc/vac-ops.h
+0
-2
include/asm-sparc64/page.h
include/asm-sparc64/page.h
+3
-2
include/linux/if_bonding.h
include/linux/if_bonding.h
+4
-1
include/net/pkt_sched.h
include/net/pkt_sched.h
+2
-1
net/core/dev.c
net/core/dev.c
+414
-403
net/ipv4/af_inet.c
net/ipv4/af_inet.c
+173
-181
net/ipv4/devinet.c
net/ipv4/devinet.c
+475
-367
net/ipv4/icmp.c
net/ipv4/icmp.c
+454
-295
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_ipv4.c
+349
-351
net/ipv6/raw.c
net/ipv6/raw.c
+19
-2
net/sched/sch_api.c
net/sched/sch_api.c
+1
-1
No files found.
CREDITS
View file @
868f24fc
...
@@ -2779,6 +2779,10 @@ N: Christopher Smith
...
@@ -2779,6 +2779,10 @@ N: Christopher Smith
E: x@xman.org
E: x@xman.org
D: Tulip net driver hacker
D: Tulip net driver hacker
N: Mark Smith
E: mark.smith@comdev.cc
D: Multicast support in bonding driver
N: Miquel van Smoorenburg
N: Miquel van Smoorenburg
E: miquels@cistron.nl
E: miquels@cistron.nl
D: Kernel and net hacker. Sysvinit, minicom. doing Debian stuff.
D: Kernel and net hacker. Sysvinit, minicom. doing Debian stuff.
...
...
arch/sparc/kernel/devices.c
View file @
868f24fc
...
@@ -22,8 +22,8 @@ extern void cpu_probe(void);
...
@@ -22,8 +22,8 @@ extern void cpu_probe(void);
extern
void
clock_stop_probe
(
void
);
/* tadpole.c */
extern
void
clock_stop_probe
(
void
);
/* tadpole.c */
extern
void
sun4c_probe_memerr_reg
(
void
);
extern
void
sun4c_probe_memerr_reg
(
void
);
unsigned
long
__init
void
__init
device_scan
(
unsigned
long
mem_start
)
device_scan
(
void
)
{
{
char
node_str
[
128
];
char
node_str
[
128
];
int
thismid
;
int
thismid
;
...
@@ -37,46 +37,45 @@ device_scan(unsigned long mem_start)
...
@@ -37,46 +37,45 @@ device_scan(unsigned long mem_start)
int
scan
;
int
scan
;
scan
=
prom_getchild
(
prom_root_node
);
scan
=
prom_getchild
(
prom_root_node
);
/* One can look it up in PROM instead */
/* One can look it up in PROM instead */
/* prom_printf("root child is %08lx\n", (unsigned long) scan); */
while
((
scan
=
prom_getsibling
(
scan
))
!=
0
)
{
while
((
scan
=
prom_getsibling
(
scan
))
!=
0
)
{
prom_getstring
(
scan
,
"device_type"
,
prom_getstring
(
scan
,
"device_type"
,
node_str
,
sizeof
(
node_str
));
node_str
,
sizeof
(
node_str
));
if
(
strcmp
(
node_str
,
"cpu"
)
==
0
)
{
if
(
strcmp
(
node_str
,
"cpu"
)
==
0
)
{
linux_cpus
[
linux_num_cpus
].
prom_node
=
scan
;
linux_cpus
[
linux_num_cpus
].
prom_node
=
scan
;
prom_getproperty
(
scan
,
"mid"
,
(
char
*
)
&
thismid
,
sizeof
(
thismid
));
prom_getproperty
(
scan
,
"mid"
,
(
char
*
)
&
thismid
,
sizeof
(
thismid
));
linux_cpus
[
linux_num_cpus
].
mid
=
thismid
;
linux_cpus
[
linux_num_cpus
].
mid
=
thismid
;
/* prom_printf("Found CPU %d <node=%08lx,mid=%d>\n", linux_num_cpus, (unsigned long) scan, thismid); */
printk
(
"Found CPU %d <node=%08lx,mid=%d>
\n
"
,
printk
(
"Found CPU %d <node=%08lx,mid=%d>
\n
"
,
linux_num_cpus
,
(
unsigned
long
)
scan
,
thismid
);
linux_num_cpus
,
(
unsigned
long
)
scan
,
thismid
);
linux_num_cpus
++
;
linux_num_cpus
++
;
}
}
}
}
if
(
linux_num_cpus
==
0
)
{
if
(
linux_num_cpus
==
0
&&
sparc_cpu_model
==
sun4d
)
{
if
(
sparc_cpu_model
==
sun4d
)
{
scan
=
prom_getchild
(
prom_root_node
);
scan
=
prom_getchild
(
prom_root_node
);
for
(
scan
=
prom_searchsiblings
(
scan
,
"cpu-unit"
);
scan
;
for
(
scan
=
prom_searchsiblings
(
scan
,
"cpu-unit"
);
scan
;
scan
=
prom_searchsiblings
(
prom_getsibling
(
scan
),
"cpu-unit"
))
{
scan
=
prom_searchsiblings
(
prom_getsibling
(
scan
),
"cpu-unit"
))
{
int
node
=
prom_getchild
(
scan
);
int
node
=
prom_getchild
(
scan
);
prom_getstring
(
node
,
"device_type"
,
node_str
,
sizeof
(
node_str
));
prom_getstring
(
node
,
"device_type"
,
node_str
,
sizeof
(
node_str
));
if
(
strcmp
(
node_str
,
"cpu"
)
==
0
)
{
if
(
strcmp
(
node_str
,
"cpu"
)
==
0
)
{
prom_getproperty
(
node
,
"cpu-id"
,
(
char
*
)
&
thismid
,
sizeof
(
thismid
));
prom_getproperty
(
node
,
"cpu-id"
,
(
char
*
)
&
thismid
,
sizeof
(
thismid
));
linux_cpus
[
linux_num_cpus
].
prom_node
=
node
;
linux_cpus
[
linux_num_cpus
].
prom_node
=
node
;
linux_cpus
[
linux_num_cpus
].
mid
=
thismid
;
linux_cpus
[
linux_num_cpus
].
mid
=
thismid
;
/* prom_printf("Found CPU %d <node=%08lx,mid=%d>\n",
linux_num_cpus, (unsigned long) node, thismid); */
printk
(
"Found CPU %d <node=%08lx,mid=%d>
\n
"
,
printk
(
"Found CPU %d <node=%08lx,mid=%d>
\n
"
,
linux_num_cpus
,
(
unsigned
long
)
node
,
thismid
);
linux_num_cpus
,
(
unsigned
long
)
node
,
thismid
);
linux_num_cpus
++
;
linux_num_cpus
++
;
}
}
}
}
}
}
}
if
(
linux_num_cpus
==
0
)
{
if
(
linux_num_cpus
==
0
)
{
printk
(
"No CPU nodes found, cannot continue.
\n
"
);
printk
(
"No CPU nodes found, cannot continue.
\n
"
);
/* Probably a sun4e, Sun is trying to trick us ;-) */
/* Probably a sun4e, Sun is trying to trick us ;-) */
halt
();
halt
();
}
}
printk
(
"Found %d CPU prom device tree node(s).
\n
"
,
linux_num_cpus
);
printk
(
"Found %d CPU prom device tree node(s).
\n
"
,
linux_num_cpus
);
}
;
}
cpu_probe
();
cpu_probe
();
#ifdef CONFIG_SUN_AUXIO
#ifdef CONFIG_SUN_AUXIO
...
@@ -92,5 +91,5 @@ device_scan(unsigned long mem_start)
...
@@ -92,5 +91,5 @@ device_scan(unsigned long mem_start)
if
(
ARCH_SUN4C_SUN4
)
if
(
ARCH_SUN4C_SUN4
)
sun4c_probe_memerr_reg
();
sun4c_probe_memerr_reg
();
return
mem_start
;
return
;
}
}
arch/sparc/mm/sun4c.c
View file @
868f24fc
...
@@ -35,6 +35,7 @@
...
@@ -35,6 +35,7 @@
#include <asm/mmu_context.h>
#include <asm/mmu_context.h>
#include <asm/sun4paddr.h>
#include <asm/sun4paddr.h>
#include <asm/highmem.h>
#include <asm/highmem.h>
#include <asm/btfixup.h>
/* Because of our dynamic kernel TLB miss strategy, and how
/* Because of our dynamic kernel TLB miss strategy, and how
* our DVMA mapping allocation works, you _MUST_:
* our DVMA mapping allocation works, you _MUST_:
...
@@ -63,25 +64,17 @@ extern unsigned long page_kernel;
...
@@ -63,25 +64,17 @@ extern unsigned long page_kernel;
#define SUN4C_KERNEL_BUCKETS 32
#define SUN4C_KERNEL_BUCKETS 32
#ifndef MAX
#define MAX(a,b) ((a)<(b)?(b):(a))
#endif
#ifndef MIN
#define MIN(a,b) ((a)<(b)?(a):(b))
#endif
/* Flushing the cache. */
/* Flushing the cache. */
struct
sun4c_vac_props
sun4c_vacinfo
;
struct
sun4c_vac_props
sun4c_vacinfo
;
unsigned
long
sun4c_kernel_faults
;
unsigned
long
sun4c_kernel_faults
;
/* Invalidate every sun4c cache line tag. */
/* Invalidate every sun4c cache line tag. */
void
sun4c_flush_all
(
void
)
static
void
__init
sun4c_flush_all
(
void
)
{
{
unsigned
long
begin
,
end
;
unsigned
long
begin
,
end
;
if
(
sun4c_vacinfo
.
on
)
if
(
sun4c_vacinfo
.
on
)
panic
(
"SUN4C: AIEEE, trying to invalidate vac while"
panic
(
"SUN4C: AIEEE, trying to invalidate vac while it is on."
);
" it is on."
);
/* Clear 'valid' bit in all cache line tags */
/* Clear 'valid' bit in all cache line tags */
begin
=
AC_CACHETAGS
;
begin
=
AC_CACHETAGS
;
...
@@ -93,7 +86,7 @@ void sun4c_flush_all(void)
...
@@ -93,7 +86,7 @@ void sun4c_flush_all(void)
}
}
}
}
static
__inline__
void
sun4c_flush_context_hw
(
void
)
static
void
sun4c_flush_context_hw
(
void
)
{
{
unsigned
long
end
=
SUN4C_VAC_SIZE
;
unsigned
long
end
=
SUN4C_VAC_SIZE
;
...
@@ -122,8 +115,17 @@ static void sun4c_flush_segment_hw(unsigned long addr)
...
@@ -122,8 +115,17 @@ static void sun4c_flush_segment_hw(unsigned long addr)
}
}
}
}
/* File local boot time fixups. */
BTFIXUPDEF_CALL
(
void
,
sun4c_flush_page
,
unsigned
long
)
BTFIXUPDEF_CALL
(
void
,
sun4c_flush_segment
,
unsigned
long
)
BTFIXUPDEF_CALL
(
void
,
sun4c_flush_context
,
void
)
#define sun4c_flush_page(addr) BTFIXUP_CALL(sun4c_flush_page)(addr)
#define sun4c_flush_segment(addr) BTFIXUP_CALL(sun4c_flush_segment)(addr)
#define sun4c_flush_context() BTFIXUP_CALL(sun4c_flush_context)()
/* Must be called minimally with interrupts disabled. */
/* Must be called minimally with interrupts disabled. */
static
__inline__
void
sun4c_flush_page_hw
(
unsigned
long
addr
)
static
void
sun4c_flush_page_hw
(
unsigned
long
addr
)
{
{
addr
&=
PAGE_MASK
;
addr
&=
PAGE_MASK
;
if
((
int
)
sun4c_get_pte
(
addr
)
<
0
)
if
((
int
)
sun4c_get_pte
(
addr
)
<
0
)
...
@@ -195,48 +197,6 @@ static void sun4c_flush_segment_sw(unsigned long addr)
...
@@ -195,48 +197,6 @@ static void sun4c_flush_segment_sw(unsigned long addr)
}
}
}
}
/* Bolix one page from the virtual cache. */
static
void
sun4c_flush_page
(
unsigned
long
addr
)
{
addr
&=
PAGE_MASK
;
if
((
sun4c_get_pte
(
addr
)
&
(
_SUN4C_PAGE_NOCACHE
|
_SUN4C_PAGE_VALID
))
!=
_SUN4C_PAGE_VALID
)
return
;
if
(
sun4c_vacinfo
.
do_hwflushes
)
{
__asm__
__volatile__
(
"sta %%g0, [%0] %1;nop;nop;nop;
\n\t
"
:
:
"r"
(
addr
),
"i"
(
ASI_HWFLUSHPAGE
));
}
else
{
unsigned
long
left
=
PAGE_SIZE
;
unsigned
long
lsize
=
sun4c_vacinfo
.
linesize
;
__asm__
__volatile__
(
"add %2, %2, %%g1
\n\t
"
"add %2, %%g1, %%g2
\n\t
"
"add %2, %%g2, %%g3
\n\t
"
"add %2, %%g3, %%g4
\n\t
"
"add %2, %%g4, %%g5
\n\t
"
"add %2, %%g5, %%o4
\n\t
"
"add %2, %%o4, %%o5
\n
"
"1:
\n\t
"
"subcc %1, %%o5, %1
\n\t
"
"sta %%g0, [%0] %6
\n\t
"
"sta %%g0, [%0 + %2] %6
\n\t
"
"sta %%g0, [%0 + %%g1] %6
\n\t
"
"sta %%g0, [%0 + %%g2] %6
\n\t
"
"sta %%g0, [%0 + %%g3] %6
\n\t
"
"sta %%g0, [%0 + %%g4] %6
\n\t
"
"sta %%g0, [%0 + %%g5] %6
\n\t
"
"sta %%g0, [%0 + %%o4] %6
\n\t
"
"bg 1b
\n\t
"
" add %0, %%o5, %0
\n\t
"
:
"=&r"
(
addr
),
"=&r"
(
left
),
"=&r"
(
lsize
)
:
"0"
(
addr
),
"1"
(
left
),
"2"
(
lsize
),
"i"
(
ASI_FLUSHPG
)
:
"g1"
,
"g2"
,
"g3"
,
"g4"
,
"g5"
,
"o4"
,
"o5"
,
"cc"
);
}
}
/* Don't inline the software version as it eats too many cache lines if expanded. */
/* Don't inline the software version as it eats too many cache lines if expanded. */
static
void
sun4c_flush_page_sw
(
unsigned
long
addr
)
static
void
sun4c_flush_page_sw
(
unsigned
long
addr
)
{
{
...
@@ -387,7 +347,8 @@ void __init sun4c_probe_vac(void)
...
@@ -387,7 +347,8 @@ void __init sun4c_probe_vac(void)
prom_getintdefault
(
prom_root_node
,
"vac_hwflush"
,
0
);
prom_getintdefault
(
prom_root_node
,
"vac_hwflush"
,
0
);
if
(
sun4c_vacinfo
.
num_bytes
!=
65536
)
{
if
(
sun4c_vacinfo
.
num_bytes
!=
65536
)
{
prom_printf
(
"WEIRD Sun4C VAC cache size, tell davem"
);
prom_printf
(
"WEIRD Sun4C VAC cache size, "
"tell sparclinux@vger.kernel.org"
);
prom_halt
();
prom_halt
();
}
}
}
}
...
@@ -427,7 +388,7 @@ extern unsigned long vac_hwflush_patch2, vac_hwflush_patch2_on;
...
@@ -427,7 +388,7 @@ extern unsigned long vac_hwflush_patch2, vac_hwflush_patch2_on;
*daddr = *iaddr; \
*daddr = *iaddr; \
} while (0)
} while (0)
static
void
patch_kernel_fault_handler
(
void
)
static
void
__init
patch_kernel_fault_handler
(
void
)
{
{
unsigned
long
*
iaddr
,
*
daddr
;
unsigned
long
*
iaddr
,
*
daddr
;
...
@@ -459,10 +420,6 @@ static void patch_kernel_fault_handler(void)
...
@@ -459,10 +420,6 @@ static void patch_kernel_fault_handler(void)
case
16
:
case
16
:
PATCH_INSN
(
num_context_patch1_16
,
PATCH_INSN
(
num_context_patch1_16
,
num_context_patch1
);
num_context_patch1
);
#if 0
PATCH_INSN(num_context_patch2_16,
num_context_patch2);
#endif
break
;
break
;
default:
default:
prom_printf
(
"Unhandled number of contexts: %d
\n
"
,
prom_printf
(
"Unhandled number of contexts: %d
\n
"
,
...
@@ -867,7 +824,7 @@ static void sun4c_kernel_map(struct sun4c_mmu_entry *kentry)
...
@@ -867,7 +824,7 @@ static void sun4c_kernel_map(struct sun4c_mmu_entry *kentry)
#define sun4c_user_unmap(__entry) \
#define sun4c_user_unmap(__entry) \
sun4c_put_segmap((__entry)->vaddr, invalid_segment)
sun4c_put_segmap((__entry)->vaddr, invalid_segment)
static
void
sun4c_demap_context
_hw
(
struct
sun4c_mmu_ring
*
crp
,
unsigned
char
ctx
)
static
void
sun4c_demap_context
(
struct
sun4c_mmu_ring
*
crp
,
unsigned
char
ctx
)
{
{
struct
sun4c_mmu_entry
*
head
=
&
crp
->
ringhd
;
struct
sun4c_mmu_entry
*
head
=
&
crp
->
ringhd
;
unsigned
long
flags
;
unsigned
long
flags
;
...
@@ -879,7 +836,7 @@ static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx
...
@@ -879,7 +836,7 @@ static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx
flush_user_windows
();
flush_user_windows
();
sun4c_set_context
(
ctx
);
sun4c_set_context
(
ctx
);
sun4c_flush_context
_hw
();
sun4c_flush_context
();
do
{
do
{
struct
sun4c_mmu_entry
*
next
=
entry
->
next
;
struct
sun4c_mmu_entry
*
next
=
entry
->
next
;
...
@@ -893,34 +850,8 @@ static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx
...
@@ -893,34 +850,8 @@ static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx
restore_flags
(
flags
);
restore_flags
(
flags
);
}
}
static
void
sun4c_demap_context_sw
(
struct
sun4c_mmu_ring
*
crp
,
unsigned
char
ctx
)
static
int
sun4c_user_taken_entries
;
/* This is how much we have. */
{
static
int
max_user_taken_entries
;
/* This limits us and prevents deadlock. */
struct
sun4c_mmu_entry
*
head
=
&
crp
->
ringhd
;
unsigned
long
flags
;
save_and_cli
(
flags
);
if
(
head
->
next
!=
head
)
{
struct
sun4c_mmu_entry
*
entry
=
head
->
next
;
int
savectx
=
sun4c_get_context
();
flush_user_windows
();
sun4c_set_context
(
ctx
);
sun4c_flush_context_sw
();
do
{
struct
sun4c_mmu_entry
*
next
=
entry
->
next
;
sun4c_user_unmap
(
entry
);
free_user_entry
(
ctx
,
entry
);
entry
=
next
;
}
while
(
entry
!=
head
);
sun4c_set_context
(
savectx
);
}
restore_flags
(
flags
);
}
static
int
sun4c_user_taken_entries
=
0
;
/* This is how much we have. */
static
int
max_user_taken_entries
=
0
;
/* This limits us and prevents deadlock. */
static
struct
sun4c_mmu_entry
*
sun4c_kernel_strategy
(
void
)
static
struct
sun4c_mmu_entry
*
sun4c_kernel_strategy
(
void
)
{
{
...
@@ -934,10 +865,7 @@ static struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
...
@@ -934,10 +865,7 @@ static struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
/* Else free one up. */
/* Else free one up. */
this_entry
=
sun4c_kernel_ring
.
ringhd
.
prev
;
this_entry
=
sun4c_kernel_ring
.
ringhd
.
prev
;
if
(
sun4c_vacinfo
.
do_hwflushes
)
sun4c_flush_segment
(
this_entry
->
vaddr
);
sun4c_flush_segment_hw
(
this_entry
->
vaddr
);
else
sun4c_flush_segment_sw
(
this_entry
->
vaddr
);
sun4c_kernel_unmap
(
this_entry
);
sun4c_kernel_unmap
(
this_entry
);
free_kernel_entry
(
this_entry
,
&
sun4c_kernel_ring
);
free_kernel_entry
(
this_entry
,
&
sun4c_kernel_ring
);
this_entry
=
sun4c_kfree_ring
.
ringhd
.
next
;
this_entry
=
sun4c_kfree_ring
.
ringhd
.
next
;
...
@@ -976,10 +904,7 @@ static struct sun4c_mmu_entry *sun4c_user_strategy(void)
...
@@ -976,10 +904,7 @@ static struct sun4c_mmu_entry *sun4c_user_strategy(void)
savectx
=
sun4c_get_context
();
savectx
=
sun4c_get_context
();
flush_user_windows
();
flush_user_windows
();
sun4c_set_context
(
ctx
);
sun4c_set_context
(
ctx
);
if
(
sun4c_vacinfo
.
do_hwflushes
)
sun4c_flush_segment
(
entry
->
vaddr
);
sun4c_flush_segment_hw
(
entry
->
vaddr
);
else
sun4c_flush_segment_sw
(
entry
->
vaddr
);
sun4c_user_unmap
(
entry
);
sun4c_user_unmap
(
entry
);
remove_ring
(
sun4c_context_ring
+
ctx
,
entry
);
remove_ring
(
sun4c_context_ring
+
ctx
,
entry
);
remove_lru
(
entry
);
remove_lru
(
entry
);
...
@@ -1068,10 +993,7 @@ static void free_locked_segment(unsigned long addr)
...
@@ -1068,10 +993,7 @@ static void free_locked_segment(unsigned long addr)
entry
=
&
mmu_entry_pool
[
pseg
];
entry
=
&
mmu_entry_pool
[
pseg
];
flush_user_windows
();
flush_user_windows
();
if
(
sun4c_vacinfo
.
do_hwflushes
)
sun4c_flush_segment
(
addr
);
sun4c_flush_segment_hw
(
addr
);
else
sun4c_flush_segment_sw
(
addr
);
sun4c_kernel_unmap
(
entry
);
sun4c_kernel_unmap
(
entry
);
add_ring
(
&
sun4c_ufree_ring
,
entry
);
add_ring
(
&
sun4c_ufree_ring
,
entry
);
max_user_taken_entries
++
;
max_user_taken_entries
++
;
...
@@ -1126,17 +1048,10 @@ static struct task_struct *sun4c_alloc_task_struct(void)
...
@@ -1126,17 +1048,10 @@ static struct task_struct *sun4c_alloc_task_struct(void)
/* We are changing the virtual color of the page(s)
/* We are changing the virtual color of the page(s)
* so we must flush the cache to guarentee consistancy.
* so we must flush the cache to guarentee consistancy.
*/
*/
if
(
sun4c_vacinfo
.
do_hwflushes
)
{
sun4c_flush_page
(
pages
);
sun4c_flush_page_hw
(
pages
);
#ifndef CONFIG_SUN4
#ifndef CONFIG_SUN4
sun4c_flush_page_hw
(
pages
+
PAGE_SIZE
);
sun4c_flush_page
(
pages
+
PAGE_SIZE
);
#endif
#endif
}
else
{
sun4c_flush_page_sw
(
pages
);
#ifndef CONFIG_SUN4
sun4c_flush_page_sw
(
pages
+
PAGE_SIZE
);
#endif
}
sun4c_put_pte
(
addr
,
BUCKET_PTE
(
pages
));
sun4c_put_pte
(
addr
,
BUCKET_PTE
(
pages
));
#ifndef CONFIG_SUN4
#ifndef CONFIG_SUN4
...
@@ -1145,7 +1060,7 @@ static struct task_struct *sun4c_alloc_task_struct(void)
...
@@ -1145,7 +1060,7 @@ static struct task_struct *sun4c_alloc_task_struct(void)
return
(
struct
task_struct
*
)
addr
;
return
(
struct
task_struct
*
)
addr
;
}
}
static
void
sun4c_free_task_struct
_hw
(
struct
task_struct
*
tsk
)
static
void
sun4c_free_task_struct
(
struct
task_struct
*
tsk
)
{
{
unsigned
long
tsaddr
=
(
unsigned
long
)
tsk
;
unsigned
long
tsaddr
=
(
unsigned
long
)
tsk
;
unsigned
long
pages
=
BUCKET_PTE_PAGE
(
sun4c_get_pte
(
tsaddr
));
unsigned
long
pages
=
BUCKET_PTE_PAGE
(
sun4c_get_pte
(
tsaddr
));
...
@@ -1153,34 +1068,9 @@ static void sun4c_free_task_struct_hw(struct task_struct *tsk)
...
@@ -1153,34 +1068,9 @@ static void sun4c_free_task_struct_hw(struct task_struct *tsk)
if
(
atomic_dec_and_test
(
&
(
tsk
)
->
thread
.
refcount
))
{
if
(
atomic_dec_and_test
(
&
(
tsk
)
->
thread
.
refcount
))
{
/* We are deleting a mapping, so the flush here is mandatory. */
/* We are deleting a mapping, so the flush here is mandatory. */
sun4c_flush_page
_hw
(
tsaddr
);
sun4c_flush_page
(
tsaddr
);
#ifndef CONFIG_SUN4
#ifndef CONFIG_SUN4
sun4c_flush_page_hw
(
tsaddr
+
PAGE_SIZE
);
sun4c_flush_page
(
tsaddr
+
PAGE_SIZE
);
#endif
sun4c_put_pte
(
tsaddr
,
0
);
#ifndef CONFIG_SUN4
sun4c_put_pte
(
tsaddr
+
PAGE_SIZE
,
0
);
#endif
sun4c_bucket
[
entry
]
=
BUCKET_EMPTY
;
if
(
entry
<
sun4c_lowbucket_avail
)
sun4c_lowbucket_avail
=
entry
;
free_pages
(
pages
,
TASK_STRUCT_ORDER
);
garbage_collect
(
entry
);
}
}
static
void
sun4c_free_task_struct_sw
(
struct
task_struct
*
tsk
)
{
unsigned
long
tsaddr
=
(
unsigned
long
)
tsk
;
unsigned
long
pages
=
BUCKET_PTE_PAGE
(
sun4c_get_pte
(
tsaddr
));
int
entry
=
BUCKET_NUM
(
tsaddr
);
if
(
atomic_dec_and_test
(
&
(
tsk
)
->
thread
.
refcount
))
{
/* We are deleting a mapping, so the flush here is mandatory. */
sun4c_flush_page_sw
(
tsaddr
);
#ifndef CONFIG_SUN4
sun4c_flush_page_sw
(
tsaddr
+
PAGE_SIZE
);
#endif
#endif
sun4c_put_pte
(
tsaddr
,
0
);
sun4c_put_pte
(
tsaddr
,
0
);
#ifndef CONFIG_SUN4
#ifndef CONFIG_SUN4
...
@@ -1452,131 +1342,7 @@ static void sun4c_flush_cache_all(void)
...
@@ -1452,131 +1342,7 @@ static void sun4c_flush_cache_all(void)
}
}
}
}
static
void
sun4c_flush_cache_mm_hw
(
struct
mm_struct
*
mm
)
static
void
sun4c_flush_cache_mm
(
struct
mm_struct
*
mm
)
{
int
new_ctx
=
mm
->
context
;
if
(
new_ctx
!=
NO_CONTEXT
)
{
flush_user_windows
();
if
(
sun4c_context_ring
[
new_ctx
].
num_entries
)
{
struct
sun4c_mmu_entry
*
head
=
&
sun4c_context_ring
[
new_ctx
].
ringhd
;
unsigned
long
flags
;
save_and_cli
(
flags
);
if
(
head
->
next
!=
head
)
{
struct
sun4c_mmu_entry
*
entry
=
head
->
next
;
int
savectx
=
sun4c_get_context
();
sun4c_set_context
(
new_ctx
);
sun4c_flush_context_hw
();
do
{
struct
sun4c_mmu_entry
*
next
=
entry
->
next
;
sun4c_user_unmap
(
entry
);
free_user_entry
(
new_ctx
,
entry
);
entry
=
next
;
}
while
(
entry
!=
head
);
sun4c_set_context
(
savectx
);
}
restore_flags
(
flags
);
}
}
}
static
void
sun4c_flush_cache_range_hw
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
int
new_ctx
=
mm
->
context
;
if
(
new_ctx
!=
NO_CONTEXT
)
{
struct
sun4c_mmu_entry
*
head
=
&
sun4c_context_ring
[
new_ctx
].
ringhd
;
struct
sun4c_mmu_entry
*
entry
;
unsigned
long
flags
;
flush_user_windows
();
save_and_cli
(
flags
);
/* All user segmap chains are ordered on entry->vaddr. */
for
(
entry
=
head
->
next
;
(
entry
!=
head
)
&&
((
entry
->
vaddr
+
SUN4C_REAL_PGDIR_SIZE
)
<
start
);
entry
=
entry
->
next
)
;
/* Tracing various job mixtures showed that this conditional
* only passes ~35% of the time for most worse case situations,
* therefore we avoid all of this gross overhead ~65% of the time.
*/
if
((
entry
!=
head
)
&&
(
entry
->
vaddr
<
end
))
{
int
octx
=
sun4c_get_context
();
sun4c_set_context
(
new_ctx
);
/* At this point, always, (start >= entry->vaddr) and
* (entry->vaddr < end), once the latter condition
* ceases to hold, or we hit the end of the list, we
* exit the loop. The ordering of all user allocated
* segmaps makes this all work out so beautifully.
*/
do
{
struct
sun4c_mmu_entry
*
next
=
entry
->
next
;
unsigned
long
realend
;
/* "realstart" is always >= entry->vaddr */
realend
=
entry
->
vaddr
+
SUN4C_REAL_PGDIR_SIZE
;
if
(
end
<
realend
)
realend
=
end
;
if
((
realend
-
entry
->
vaddr
)
<=
(
PAGE_SIZE
<<
3
))
{
unsigned
long
page
=
entry
->
vaddr
;
while
(
page
<
realend
)
{
sun4c_flush_page_hw
(
page
);
page
+=
PAGE_SIZE
;
}
}
else
{
sun4c_flush_segment_hw
(
entry
->
vaddr
);
sun4c_user_unmap
(
entry
);
free_user_entry
(
new_ctx
,
entry
);
}
entry
=
next
;
}
while
((
entry
!=
head
)
&&
(
entry
->
vaddr
<
end
));
sun4c_set_context
(
octx
);
}
restore_flags
(
flags
);
}
}
static
void
sun4c_flush_cache_page_hw
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
)
{
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
int
new_ctx
=
mm
->
context
;
/* Sun4c has no separate I/D caches so cannot optimize for non
* text page flushes.
*/
if
(
new_ctx
!=
NO_CONTEXT
)
{
int
octx
=
sun4c_get_context
();
unsigned
long
flags
;
flush_user_windows
();
save_and_cli
(
flags
);
sun4c_set_context
(
new_ctx
);
sun4c_flush_page_hw
(
page
);
sun4c_set_context
(
octx
);
restore_flags
(
flags
);
}
}
static
void
sun4c_flush_page_to_ram_hw
(
unsigned
long
page
)
{
unsigned
long
flags
;
save_and_cli
(
flags
);
sun4c_flush_page_hw
(
page
);
restore_flags
(
flags
);
}
static
void
sun4c_flush_cache_mm_sw
(
struct
mm_struct
*
mm
)
{
{
int
new_ctx
=
mm
->
context
;
int
new_ctx
=
mm
->
context
;
...
@@ -1593,7 +1359,7 @@ static void sun4c_flush_cache_mm_sw(struct mm_struct *mm)
...
@@ -1593,7 +1359,7 @@ static void sun4c_flush_cache_mm_sw(struct mm_struct *mm)
int
savectx
=
sun4c_get_context
();
int
savectx
=
sun4c_get_context
();
sun4c_set_context
(
new_ctx
);
sun4c_set_context
(
new_ctx
);
sun4c_flush_context
_sw
();
sun4c_flush_context
();
do
{
do
{
struct
sun4c_mmu_entry
*
next
=
entry
->
next
;
struct
sun4c_mmu_entry
*
next
=
entry
->
next
;
...
@@ -1653,11 +1419,11 @@ static void sun4c_flush_cache_range_sw(struct vm_area_struct *vma, unsigned long
...
@@ -1653,11 +1419,11 @@ static void sun4c_flush_cache_range_sw(struct vm_area_struct *vma, unsigned long
if
((
realend
-
entry
->
vaddr
)
<=
(
PAGE_SIZE
<<
3
))
{
if
((
realend
-
entry
->
vaddr
)
<=
(
PAGE_SIZE
<<
3
))
{
unsigned
long
page
=
entry
->
vaddr
;
unsigned
long
page
=
entry
->
vaddr
;
while
(
page
<
realend
)
{
while
(
page
<
realend
)
{
sun4c_flush_page
_sw
(
page
);
sun4c_flush_page
(
page
);
page
+=
PAGE_SIZE
;
page
+=
PAGE_SIZE
;
}
}
}
else
{
}
else
{
sun4c_flush_segment
_sw
(
entry
->
vaddr
);
sun4c_flush_segment
(
entry
->
vaddr
);
sun4c_user_unmap
(
entry
);
sun4c_user_unmap
(
entry
);
free_user_entry
(
new_ctx
,
entry
);
free_user_entry
(
new_ctx
,
entry
);
}
}
...
@@ -1669,7 +1435,7 @@ static void sun4c_flush_cache_range_sw(struct vm_area_struct *vma, unsigned long
...
@@ -1669,7 +1435,7 @@ static void sun4c_flush_cache_range_sw(struct vm_area_struct *vma, unsigned long
}
}
}
}
static
void
sun4c_flush_cache_page
_sw
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
)
static
void
sun4c_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
)
{
{
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
int
new_ctx
=
mm
->
context
;
int
new_ctx
=
mm
->
context
;
...
@@ -1684,18 +1450,18 @@ static void sun4c_flush_cache_page_sw(struct vm_area_struct *vma, unsigned long
...
@@ -1684,18 +1450,18 @@ static void sun4c_flush_cache_page_sw(struct vm_area_struct *vma, unsigned long
flush_user_windows
();
flush_user_windows
();
save_and_cli
(
flags
);
save_and_cli
(
flags
);
sun4c_set_context
(
new_ctx
);
sun4c_set_context
(
new_ctx
);
sun4c_flush_page
_sw
(
page
);
sun4c_flush_page
(
page
);
sun4c_set_context
(
octx
);
sun4c_set_context
(
octx
);
restore_flags
(
flags
);
restore_flags
(
flags
);
}
}
}
}
static
void
sun4c_flush_page_to_ram
_sw
(
unsigned
long
page
)
static
void
sun4c_flush_page_to_ram
(
unsigned
long
page
)
{
{
unsigned
long
flags
;
unsigned
long
flags
;
save_and_cli
(
flags
);
save_and_cli
(
flags
);
sun4c_flush_page
_sw
(
page
);
sun4c_flush_page
(
page
);
restore_flags
(
flags
);
restore_flags
(
flags
);
}
}
...
@@ -1723,10 +1489,7 @@ static void sun4c_flush_tlb_all(void)
...
@@ -1723,10 +1489,7 @@ static void sun4c_flush_tlb_all(void)
flush_user_windows
();
flush_user_windows
();
while
(
sun4c_kernel_ring
.
num_entries
)
{
while
(
sun4c_kernel_ring
.
num_entries
)
{
next_entry
=
this_entry
->
next
;
next_entry
=
this_entry
->
next
;
if
(
sun4c_vacinfo
.
do_hwflushes
)
sun4c_flush_segment
(
this_entry
->
vaddr
);
sun4c_flush_segment_hw
(
this_entry
->
vaddr
);
else
sun4c_flush_segment_sw
(
this_entry
->
vaddr
);
for
(
ctx
=
0
;
ctx
<
num_contexts
;
ctx
++
)
{
for
(
ctx
=
0
;
ctx
<
num_contexts
;
ctx
++
)
{
sun4c_set_context
(
ctx
);
sun4c_set_context
(
ctx
);
sun4c_put_segmap
(
this_entry
->
vaddr
,
invalid_segment
);
sun4c_put_segmap
(
this_entry
->
vaddr
,
invalid_segment
);
...
@@ -1738,91 +1501,7 @@ static void sun4c_flush_tlb_all(void)
...
@@ -1738,91 +1501,7 @@ static void sun4c_flush_tlb_all(void)
restore_flags
(
flags
);
restore_flags
(
flags
);
}
}
static
void
sun4c_flush_tlb_mm_hw
(
struct
mm_struct
*
mm
)
static
void
sun4c_flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
int
new_ctx
=
mm
->
context
;
if
(
new_ctx
!=
NO_CONTEXT
)
{
struct
sun4c_mmu_entry
*
head
=
&
sun4c_context_ring
[
new_ctx
].
ringhd
;
unsigned
long
flags
;
save_and_cli
(
flags
);
if
(
head
->
next
!=
head
)
{
struct
sun4c_mmu_entry
*
entry
=
head
->
next
;
int
savectx
=
sun4c_get_context
();
sun4c_set_context
(
new_ctx
);
sun4c_flush_context_hw
();
do
{
struct
sun4c_mmu_entry
*
next
=
entry
->
next
;
sun4c_user_unmap
(
entry
);
free_user_entry
(
new_ctx
,
entry
);
entry
=
next
;
}
while
(
entry
!=
head
);
sun4c_set_context
(
savectx
);
}
restore_flags
(
flags
);
}
}
static
void
sun4c_flush_tlb_range_hw
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
int
new_ctx
=
mm
->
context
;
if
(
new_ctx
!=
NO_CONTEXT
)
{
struct
sun4c_mmu_entry
*
head
=
&
sun4c_context_ring
[
new_ctx
].
ringhd
;
struct
sun4c_mmu_entry
*
entry
;
unsigned
long
flags
;
save_and_cli
(
flags
);
/* See commentary in sun4c_flush_cache_range_*(). */
for
(
entry
=
head
->
next
;
(
entry
!=
head
)
&&
((
entry
->
vaddr
+
SUN4C_REAL_PGDIR_SIZE
)
<
start
);
entry
=
entry
->
next
)
;
if
((
entry
!=
head
)
&&
(
entry
->
vaddr
<
end
))
{
int
octx
=
sun4c_get_context
();
sun4c_set_context
(
new_ctx
);
do
{
struct
sun4c_mmu_entry
*
next
=
entry
->
next
;
sun4c_flush_segment_hw
(
entry
->
vaddr
);
sun4c_user_unmap
(
entry
);
free_user_entry
(
new_ctx
,
entry
);
entry
=
next
;
}
while
((
entry
!=
head
)
&&
(
entry
->
vaddr
<
end
));
sun4c_set_context
(
octx
);
}
restore_flags
(
flags
);
}
}
static
void
sun4c_flush_tlb_page_hw
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
)
{
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
int
new_ctx
=
mm
->
context
;
if
(
new_ctx
!=
NO_CONTEXT
)
{
int
savectx
=
sun4c_get_context
();
unsigned
long
flags
;
save_and_cli
(
flags
);
sun4c_set_context
(
new_ctx
);
page
&=
PAGE_MASK
;
sun4c_flush_page_hw
(
page
);
sun4c_put_pte
(
page
,
0
);
sun4c_set_context
(
savectx
);
restore_flags
(
flags
);
}
}
static
void
sun4c_flush_tlb_mm_sw
(
struct
mm_struct
*
mm
)
{
{
int
new_ctx
=
mm
->
context
;
int
new_ctx
=
mm
->
context
;
...
@@ -1836,7 +1515,7 @@ static void sun4c_flush_tlb_mm_sw(struct mm_struct *mm)
...
@@ -1836,7 +1515,7 @@ static void sun4c_flush_tlb_mm_sw(struct mm_struct *mm)
int
savectx
=
sun4c_get_context
();
int
savectx
=
sun4c_get_context
();
sun4c_set_context
(
new_ctx
);
sun4c_set_context
(
new_ctx
);
sun4c_flush_context
_sw
();
sun4c_flush_context
();
do
{
do
{
struct
sun4c_mmu_entry
*
next
=
entry
->
next
;
struct
sun4c_mmu_entry
*
next
=
entry
->
next
;
...
@@ -1862,7 +1541,7 @@ static void sun4c_flush_tlb_range_sw(struct vm_area_struct *vma, unsigned long s
...
@@ -1862,7 +1541,7 @@ static void sun4c_flush_tlb_range_sw(struct vm_area_struct *vma, unsigned long s
unsigned
long
flags
;
unsigned
long
flags
;
save_and_cli
(
flags
);
save_and_cli
(
flags
);
/* See commentary in sun4c_flush_cache_range
_*
(). */
/* See commentary in sun4c_flush_cache_range(). */
for
(
entry
=
head
->
next
;
for
(
entry
=
head
->
next
;
(
entry
!=
head
)
&&
((
entry
->
vaddr
+
SUN4C_REAL_PGDIR_SIZE
)
<
start
);
(
entry
!=
head
)
&&
((
entry
->
vaddr
+
SUN4C_REAL_PGDIR_SIZE
)
<
start
);
entry
=
entry
->
next
)
entry
=
entry
->
next
)
...
@@ -1875,7 +1554,7 @@ static void sun4c_flush_tlb_range_sw(struct vm_area_struct *vma, unsigned long s
...
@@ -1875,7 +1554,7 @@ static void sun4c_flush_tlb_range_sw(struct vm_area_struct *vma, unsigned long s
do
{
do
{
struct
sun4c_mmu_entry
*
next
=
entry
->
next
;
struct
sun4c_mmu_entry
*
next
=
entry
->
next
;
sun4c_flush_segment
_sw
(
entry
->
vaddr
);
sun4c_flush_segment
(
entry
->
vaddr
);
sun4c_user_unmap
(
entry
);
sun4c_user_unmap
(
entry
);
free_user_entry
(
new_ctx
,
entry
);
free_user_entry
(
new_ctx
,
entry
);
...
@@ -1887,7 +1566,7 @@ static void sun4c_flush_tlb_range_sw(struct vm_area_struct *vma, unsigned long s
...
@@ -1887,7 +1566,7 @@ static void sun4c_flush_tlb_range_sw(struct vm_area_struct *vma, unsigned long s
}
}
}
}
static
void
sun4c_flush_tlb_page
_sw
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
)
static
void
sun4c_flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
page
)
{
{
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
int
new_ctx
=
mm
->
context
;
int
new_ctx
=
mm
->
context
;
...
@@ -1899,7 +1578,7 @@ static void sun4c_flush_tlb_page_sw(struct vm_area_struct *vma, unsigned long pa
...
@@ -1899,7 +1578,7 @@ static void sun4c_flush_tlb_page_sw(struct vm_area_struct *vma, unsigned long pa
save_and_cli
(
flags
);
save_and_cli
(
flags
);
sun4c_set_context
(
new_ctx
);
sun4c_set_context
(
new_ctx
);
page
&=
PAGE_MASK
;
page
&=
PAGE_MASK
;
sun4c_flush_page
_sw
(
page
);
sun4c_flush_page
(
page
);
sun4c_put_pte
(
page
,
0
);
sun4c_put_pte
(
page
,
0
);
sun4c_set_context
(
savectx
);
sun4c_set_context
(
savectx
);
restore_flags
(
flags
);
restore_flags
(
flags
);
...
@@ -1923,7 +1602,7 @@ void sun4c_unmapioaddr(unsigned long virt_addr)
...
@@ -1923,7 +1602,7 @@ void sun4c_unmapioaddr(unsigned long virt_addr)
sun4c_put_pte
(
virt_addr
,
0
);
sun4c_put_pte
(
virt_addr
,
0
);
}
}
static
void
sun4c_alloc_context
_hw
(
struct
mm_struct
*
old_mm
,
struct
mm_struct
*
mm
)
static
void
sun4c_alloc_context
(
struct
mm_struct
*
old_mm
,
struct
mm_struct
*
mm
)
{
{
struct
ctx_list
*
ctxp
;
struct
ctx_list
*
ctxp
;
...
@@ -1943,92 +1622,35 @@ static void sun4c_alloc_context_hw(struct mm_struct *old_mm, struct mm_struct *m
...
@@ -1943,92 +1622,35 @@ static void sun4c_alloc_context_hw(struct mm_struct *old_mm, struct mm_struct *m
ctxp
->
ctx_mm
->
context
=
NO_CONTEXT
;
ctxp
->
ctx_mm
->
context
=
NO_CONTEXT
;
ctxp
->
ctx_mm
=
mm
;
ctxp
->
ctx_mm
=
mm
;
mm
->
context
=
ctxp
->
ctx_number
;
mm
->
context
=
ctxp
->
ctx_number
;
sun4c_demap_context_hw
(
&
sun4c_context_ring
[
ctxp
->
ctx_number
],
sun4c_demap_context
(
&
sun4c_context_ring
[
ctxp
->
ctx_number
],
ctxp
->
ctx_number
);
}
/* Switch the current MM context. */
static
void
sun4c_switch_mm_hw
(
struct
mm_struct
*
old_mm
,
struct
mm_struct
*
mm
,
struct
task_struct
*
tsk
,
int
cpu
)
{
struct
ctx_list
*
ctx
;
int
dirty
=
0
;
if
(
mm
->
context
==
NO_CONTEXT
)
{
dirty
=
1
;
sun4c_alloc_context_hw
(
old_mm
,
mm
);
}
else
{
/* Update the LRU ring of contexts. */
ctx
=
ctx_list_pool
+
mm
->
context
;
remove_from_ctx_list
(
ctx
);
add_to_used_ctxlist
(
ctx
);
}
if
(
dirty
||
old_mm
!=
mm
)
sun4c_set_context
(
mm
->
context
);
}
static
void
sun4c_destroy_context_hw
(
struct
mm_struct
*
mm
)
{
struct
ctx_list
*
ctx_old
;
if
(
mm
->
context
!=
NO_CONTEXT
)
{
sun4c_demap_context_hw
(
&
sun4c_context_ring
[
mm
->
context
],
mm
->
context
);
ctx_old
=
ctx_list_pool
+
mm
->
context
;
remove_from_ctx_list
(
ctx_old
);
add_to_free_ctxlist
(
ctx_old
);
mm
->
context
=
NO_CONTEXT
;
}
}
static
void
sun4c_alloc_context_sw
(
struct
mm_struct
*
old_mm
,
struct
mm_struct
*
mm
)
{
struct
ctx_list
*
ctxp
;
ctxp
=
ctx_free
.
next
;
if
(
ctxp
!=
&
ctx_free
)
{
remove_from_ctx_list
(
ctxp
);
add_to_used_ctxlist
(
ctxp
);
mm
->
context
=
ctxp
->
ctx_number
;
ctxp
->
ctx_mm
=
mm
;
return
;
}
ctxp
=
ctx_used
.
next
;
if
(
ctxp
->
ctx_mm
==
old_mm
)
ctxp
=
ctxp
->
next
;
remove_from_ctx_list
(
ctxp
);
add_to_used_ctxlist
(
ctxp
);
ctxp
->
ctx_mm
->
context
=
NO_CONTEXT
;
ctxp
->
ctx_mm
=
mm
;
mm
->
context
=
ctxp
->
ctx_number
;
sun4c_demap_context_sw
(
&
sun4c_context_ring
[
ctxp
->
ctx_number
],
ctxp
->
ctx_number
);
ctxp
->
ctx_number
);
}
}
/* Switch the current MM context. */
/* Switch the current MM context. */
static
void
sun4c_switch_mm
_sw
(
struct
mm_struct
*
old_mm
,
struct
mm_struct
*
mm
,
struct
task_struct
*
tsk
,
int
cpu
)
static
void
sun4c_switch_mm
(
struct
mm_struct
*
old_mm
,
struct
mm_struct
*
mm
,
struct
task_struct
*
tsk
,
int
cpu
)
{
{
struct
ctx_list
*
ctx
;
struct
ctx_list
*
ctx
;
int
dirty
=
0
;
int
dirty
=
0
;
if
(
mm
->
context
==
NO_CONTEXT
)
{
if
(
mm
->
context
==
NO_CONTEXT
)
{
dirty
=
1
;
dirty
=
1
;
sun4c_alloc_context
_sw
(
old_mm
,
mm
);
sun4c_alloc_context
(
old_mm
,
mm
);
}
else
{
}
else
{
/* Update the LRU ring of contexts. */
/* Update the LRU ring of contexts. */
ctx
=
ctx_list_pool
+
mm
->
context
;
ctx
=
ctx_list_pool
+
mm
->
context
;
remove_from_ctx_list
(
ctx
);
remove_from_ctx_list
(
ctx
);
add_to_used_ctxlist
(
ctx
);
add_to_used_ctxlist
(
ctx
);
}
}
if
(
dirty
||
old_mm
!=
mm
)
if
(
dirty
||
old_mm
!=
mm
)
sun4c_set_context
(
mm
->
context
);
sun4c_set_context
(
mm
->
context
);
}
}
static
void
sun4c_destroy_context
_sw
(
struct
mm_struct
*
mm
)
static
void
sun4c_destroy_context
(
struct
mm_struct
*
mm
)
{
{
struct
ctx_list
*
ctx_old
;
struct
ctx_list
*
ctx_old
;
if
(
mm
->
context
!=
NO_CONTEXT
)
{
if
(
mm
->
context
!=
NO_CONTEXT
)
{
sun4c_demap_context
_sw
(
&
sun4c_context_ring
[
mm
->
context
],
mm
->
context
);
sun4c_demap_context
(
&
sun4c_context_ring
[
mm
->
context
],
mm
->
context
);
ctx_old
=
ctx_list_pool
+
mm
->
context
;
ctx_old
=
ctx_list_pool
+
mm
->
context
;
remove_from_ctx_list
(
ctx_old
);
remove_from_ctx_list
(
ctx_old
);
add_to_free_ctxlist
(
ctx_old
);
add_to_free_ctxlist
(
ctx_old
);
...
@@ -2095,7 +1717,7 @@ static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
...
@@ -2095,7 +1717,7 @@ static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
static
void
sun4c_pmd_set
(
pmd_t
*
pmdp
,
pte_t
*
ptep
)
static
void
sun4c_pmd_set
(
pmd_t
*
pmdp
,
pte_t
*
ptep
)
{
{
*
pmdp
=
(
PGD_TABLE
|
(
unsigned
long
)
ptep
);
*
pmdp
=
__pmd
(
PGD_TABLE
|
(
unsigned
long
)
ptep
);
}
}
static
int
sun4c_pte_present
(
pte_t
pte
)
static
int
sun4c_pte_present
(
pte_t
pte
)
...
@@ -2178,10 +1800,7 @@ static inline unsigned long sun4c_pmd_page(pmd_t pmd)
...
@@ -2178,10 +1800,7 @@ static inline unsigned long sun4c_pmd_page(pmd_t pmd)
return
(
pmd_val
(
pmd
)
&
PAGE_MASK
);
return
(
pmd_val
(
pmd
)
&
PAGE_MASK
);
}
}
static
unsigned
long
sun4c_pgd_page
(
pgd_t
pgd
)
static
unsigned
long
sun4c_pgd_page
(
pgd_t
pgd
)
{
return
0
;
}
{
return
0
;
}
/* to find an entry in a page-table-directory */
/* to find an entry in a page-table-directory */
static
inline
pgd_t
*
sun4c_pgd_offset
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
static
inline
pgd_t
*
sun4c_pgd_offset
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
...
@@ -2275,9 +1894,7 @@ static pmd_t *sun4c_pmd_alloc_one_fast(struct mm_struct *mm, unsigned long addre
...
@@ -2275,9 +1894,7 @@ static pmd_t *sun4c_pmd_alloc_one_fast(struct mm_struct *mm, unsigned long addre
return
NULL
;
return
NULL
;
}
}
static
void
sun4c_free_pmd_fast
(
pmd_t
*
pmd
)
static
void
sun4c_free_pmd_fast
(
pmd_t
*
pmd
)
{
}
{
}
static
int
sun4c_check_pgt_cache
(
int
low
,
int
high
)
static
int
sun4c_check_pgt_cache
(
int
low
,
int
high
)
{
{
...
@@ -2470,37 +2087,31 @@ void __init ld_mmu_sun4c(void)
...
@@ -2470,37 +2087,31 @@ void __init ld_mmu_sun4c(void)
_SUN4C_PAGE_IO
|
_SUN4C_PAGE_NOCACHE
;
_SUN4C_PAGE_IO
|
_SUN4C_PAGE_NOCACHE
;
/* Functions */
/* Functions */
#ifndef CONFIG_SMP
BTFIXUPSET_CALL
(
___xchg32
,
___xchg32_sun4c
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
___xchg32
,
___xchg32_sun4c
,
BTFIXUPCALL_NORM
);
#endif
BTFIXUPSET_CALL
(
do_check_pgt_cache
,
sun4c_check_pgt_cache
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
do_check_pgt_cache
,
sun4c_check_pgt_cache
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_cache_all
,
sun4c_flush_cache_all
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_cache_all
,
sun4c_flush_cache_all
,
BTFIXUPCALL_NORM
);
if
(
sun4c_vacinfo
.
do_hwflushes
)
{
if
(
sun4c_vacinfo
.
do_hwflushes
)
{
BTFIXUPSET_CALL
(
flush_cache_mm
,
sun4c_flush_cache_mm_hw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
sun4c_flush_page
,
sun4c_flush_page_hw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_cache_range
,
sun4c_flush_cache_range_hw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
sun4c_flush_segment
,
sun4c_flush_segment_hw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_cache_page
,
sun4c_flush_cache_page_hw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
sun4c_flush_context
,
sun4c_flush_context_hw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
__flush_page_to_ram
,
sun4c_flush_page_to_ram_hw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_tlb_mm
,
sun4c_flush_tlb_mm_hw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_tlb_range
,
sun4c_flush_tlb_range_hw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_tlb_page
,
sun4c_flush_tlb_page_hw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
free_task_struct
,
sun4c_free_task_struct_hw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
switch_mm
,
sun4c_switch_mm_hw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
destroy_context
,
sun4c_destroy_context_hw
,
BTFIXUPCALL_NORM
);
}
else
{
}
else
{
BTFIXUPSET_CALL
(
flush_cache_mm
,
sun4c_flush_cache_mm_sw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
sun4c_flush_page
,
sun4c_flush_page_sw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_cache_range
,
sun4c_flush_cache_range_sw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
sun4c_flush_segment
,
sun4c_flush_segment_sw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_cache_page
,
sun4c_flush_cache_page_sw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
sun4c_flush_context
,
sun4c_flush_context_sw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
__flush_page_to_ram
,
sun4c_flush_page_to_ram_sw
,
BTFIXUPCALL_NORM
);
}
BTFIXUPSET_CALL
(
flush_tlb_mm
,
sun4c_flush_tlb_mm_sw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_tlb_range
,
sun4c_flush_tlb_range_sw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_tlb_mm
,
sun4c_flush_tlb_mm
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_tlb_page
,
sun4c_flush_tlb_page_sw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_cache_mm
,
sun4c_flush_cache_mm
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
free_task_struct
,
sun4c_free_task_struct_sw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
destroy_context
,
sun4c_destroy_context
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
switch_mm
,
sun4c_switch_mm_sw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
switch_mm
,
sun4c_switch_mm
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
destroy_context
,
sun4c_destroy_context_sw
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_cache_page
,
sun4c_flush_cache_page
,
BTFIXUPCALL_NORM
);
}
BTFIXUPSET_CALL
(
flush_tlb_page
,
sun4c_flush_tlb_page
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_tlb_range
,
sun4c_flush_tlb_range
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_cache_range
,
sun4c_flush_cache_range
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
free_task_struct
,
sun4c_free_task_struct
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
__flush_page_to_ram
,
sun4c_flush_page_to_ram
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_tlb_all
,
sun4c_flush_tlb_all
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_tlb_all
,
sun4c_flush_tlb_all
,
BTFIXUPCALL_NORM
);
BTFIXUPSET_CALL
(
flush_sig_insns
,
sun4c_flush_sig_insns
,
BTFIXUPCALL_NOP
);
BTFIXUPSET_CALL
(
flush_sig_insns
,
sun4c_flush_sig_insns
,
BTFIXUPCALL_NOP
);
...
...
arch/sparc64/kernel/Makefile
View file @
868f24fc
...
@@ -31,11 +31,6 @@ else
...
@@ -31,11 +31,6 @@ else
endif
endif
endif
endif
#
# This is just to get the dependencies...
#
binfmt_elf32.o
:
$(TOPDIR)/fs/binfmt_elf.c
ifneq
($(NEW_GCC),y)
ifneq
($(NEW_GCC),y)
CMODEL_CFLAG
:=
-mmedlow
CMODEL_CFLAG
:=
-mmedlow
else
else
...
...
arch/sparc64/kernel/sys_sparc32.c
View file @
868f24fc
...
@@ -2879,16 +2879,19 @@ do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs * regs)
...
@@ -2879,16 +2879,19 @@ do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs * regs)
bprm
.
sh_bang
=
0
;
bprm
.
sh_bang
=
0
;
bprm
.
loader
=
0
;
bprm
.
loader
=
0
;
bprm
.
exec
=
0
;
bprm
.
exec
=
0
;
if
((
bprm
.
argc
=
count32
(
argv
,
bprm
.
p
/
sizeof
(
u32
)))
<
0
)
{
allow_write_access
(
file
);
bprm
.
mm
=
mm_alloc
();
fput
(
file
);
retval
=
-
ENOMEM
;
return
bprm
.
argc
;
if
(
!
bprm
.
mm
)
}
goto
out_file
;
if
((
bprm
.
envc
=
count32
(
envp
,
bprm
.
p
/
sizeof
(
u32
)))
<
0
)
{
allow_write_access
(
file
);
bprm
.
argc
=
count32
(
argv
,
bprm
.
p
/
sizeof
(
u32
));
fput
(
file
);
if
((
retval
=
bprm
.
argc
)
<
0
)
return
bprm
.
envc
;
goto
out_mm
;
}
bprm
.
envc
=
count32
(
envp
,
bprm
.
p
/
sizeof
(
u32
));
if
((
retval
=
bprm
.
envc
)
<
0
)
goto
out_mm
;
retval
=
prepare_binprm
(
&
bprm
);
retval
=
prepare_binprm
(
&
bprm
);
if
(
retval
<
0
)
if
(
retval
<
0
)
...
@@ -2914,14 +2917,20 @@ do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs * regs)
...
@@ -2914,14 +2917,20 @@ do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs * regs)
out:
out:
/* Something went wrong, return the inode and free the argument pages*/
/* Something went wrong, return the inode and free the argument pages*/
allow_write_access
(
bprm
.
file
);
for
(
i
=
0
;
i
<
MAX_ARG_PAGES
;
i
++
)
{
if
(
bprm
.
file
)
struct
page
*
page
=
bprm
.
page
[
i
];
fput
(
bprm
.
file
);
if
(
page
)
__free_page
(
page
);
}
for
(
i
=
0
;
i
<
MAX_ARG_PAGES
;
i
++
)
out_mm:
if
(
bprm
.
page
[
i
])
mmdrop
(
bprm
.
mm
);
__free_page
(
bprm
.
page
[
i
]);
out_file:
if
(
bprm
.
file
)
{
allow_write_access
(
bprm
.
file
);
fput
(
bprm
.
file
);
}
return
retval
;
return
retval
;
}
}
...
...
arch/sparc64/mm/generic.c
View file @
868f24fc
...
@@ -17,20 +17,10 @@
...
@@ -17,20 +17,10 @@
static
inline
void
forget_pte
(
pte_t
page
)
static
inline
void
forget_pte
(
pte_t
page
)
{
{
if
(
pte_none
(
page
))
if
(
!
pte_none
(
page
))
{
return
;
printk
(
"forget_pte: old mapping existed!
\n
"
);
if
(
pte_present
(
page
))
{
BUG
();
unsigned
long
pfn
=
pte_pfn
(
page
);
struct
page
*
ptpage
;
if
(
!
pfn_valid
(
pfn
))
return
;
ptpage
=
pfn_to_page
(
page
);
if
(
PageReserved
(
ptpage
))
return
;
page_cache_release
(
ptpage
);
return
;
}
}
swap_free
(
pte_to_swp_entry
(
page
));
}
}
/* Remap IO memory, the same way as remap_page_range(), but use
/* Remap IO memory, the same way as remap_page_range(), but use
...
...
drivers/net/bonding.c
View file @
868f24fc
...
@@ -161,6 +161,21 @@
...
@@ -161,6 +161,21 @@
* - Remove possibility of calling bond_sethwaddr with NULL slave_dev ptr
* - Remove possibility of calling bond_sethwaddr with NULL slave_dev ptr
* - Handle hot swap ethernet interface deregistration events to remove
* - Handle hot swap ethernet interface deregistration events to remove
* kernel oops following hot swap of enslaved interface
* kernel oops following hot swap of enslaved interface
*
* 2002/1/2 - Chad N. Tindel <ctindel at ieee dot org>
* - Restore original slave flags at release time.
*
* 2002/02/18 - Erik Habbinga <erik_habbinga at hp dot com>
* - bond_release(): calling kfree on our_slave after call to
* bond_restore_slave_flags, not before
* - bond_enslave(): saving slave flags into original_flags before
* call to netdev_set_master, so the IFF_SLAVE flag doesn't end
* up in original_flags
*
* 2002/04/05 - Mark Smith <mark.smith at comdev dot cc> and
* Steve Mead <steve.mead at comdev dot cc>
* - Port Gleb Natapov's multicast support patchs from 2.4.12
* to 2.4.18 adding support for multicast.
*/
*/
#include <linux/config.h>
#include <linux/config.h>
...
@@ -208,11 +223,8 @@
...
@@ -208,11 +223,8 @@
#define MII_ENDOF_NWAY 0x20
#define MII_ENDOF_NWAY 0x20
#undef MII_LINK_READY
#undef MII_LINK_READY
/*#define MII_LINK_READY (MII_LINK_UP | MII_ENDOF_NWAY)*/
#define MII_LINK_READY (MII_LINK_UP)
#define MII_LINK_READY (MII_LINK_UP)
#define MAX_BOND_ADDR 256
#ifndef BOND_LINK_ARP_INTERV
#ifndef BOND_LINK_ARP_INTERV
#define BOND_LINK_ARP_INTERV 0
#define BOND_LINK_ARP_INTERV 0
#endif
#endif
...
@@ -223,7 +235,7 @@ static unsigned long arp_target = 0;
...
@@ -223,7 +235,7 @@ static unsigned long arp_target = 0;
static
u32
my_ip
=
0
;
static
u32
my_ip
=
0
;
char
*
arp_target_hw_addr
=
NULL
;
char
*
arp_target_hw_addr
=
NULL
;
static
int
max_bonds
=
MAX_BONDS
;
static
int
max_bonds
=
BOND_DEFAULT_
MAX_BONDS
;
static
int
miimon
=
BOND_LINK_MON_INTERV
;
static
int
miimon
=
BOND_LINK_MON_INTERV
;
static
int
mode
=
BOND_MODE_ROUNDROBIN
;
static
int
mode
=
BOND_MODE_ROUNDROBIN
;
static
int
updelay
=
0
;
static
int
updelay
=
0
;
...
@@ -234,7 +246,7 @@ int bond_cnt;
...
@@ -234,7 +246,7 @@ int bond_cnt;
static
struct
bonding
*
these_bonds
=
NULL
;
static
struct
bonding
*
these_bonds
=
NULL
;
static
struct
net_device
*
dev_bonds
=
NULL
;
static
struct
net_device
*
dev_bonds
=
NULL
;
MODULE_PARM
(
max_bonds
,
"
1-"
__MODULE_STRING
(
INT_MAX
)
"
i"
);
MODULE_PARM
(
max_bonds
,
"i"
);
MODULE_PARM_DESC
(
max_bonds
,
"Max number of bonded devices"
);
MODULE_PARM_DESC
(
max_bonds
,
"Max number of bonded devices"
);
MODULE_PARM
(
miimon
,
"i"
);
MODULE_PARM
(
miimon
,
"i"
);
MODULE_PARM_DESC
(
miimon
,
"Link check interval in milliseconds"
);
MODULE_PARM_DESC
(
miimon
,
"Link check interval in milliseconds"
);
...
@@ -260,6 +272,15 @@ static struct net_device_stats *bond_get_stats(struct net_device *dev);
...
@@ -260,6 +272,15 @@ static struct net_device_stats *bond_get_stats(struct net_device *dev);
static
void
bond_mii_monitor
(
struct
net_device
*
dev
);
static
void
bond_mii_monitor
(
struct
net_device
*
dev
);
static
void
bond_arp_monitor
(
struct
net_device
*
dev
);
static
void
bond_arp_monitor
(
struct
net_device
*
dev
);
static
int
bond_event
(
struct
notifier_block
*
this
,
unsigned
long
event
,
void
*
ptr
);
static
int
bond_event
(
struct
notifier_block
*
this
,
unsigned
long
event
,
void
*
ptr
);
static
void
bond_restore_slave_flags
(
slave_t
*
slave
);
static
void
bond_mc_list_destroy
(
struct
bonding
*
bond
);
static
void
bond_mc_add
(
bonding_t
*
bond
,
void
*
addr
,
int
alen
);
static
void
bond_mc_delete
(
bonding_t
*
bond
,
void
*
addr
,
int
alen
);
static
int
bond_mc_list_copy
(
struct
dev_mc_list
*
src
,
struct
bonding
*
dst
,
int
gpf_flag
);
static
inline
int
dmi_same
(
struct
dev_mc_list
*
dmi1
,
struct
dev_mc_list
*
dmi2
);
static
void
bond_set_promiscuity
(
bonding_t
*
bond
,
int
inc
);
static
void
bond_set_allmulti
(
bonding_t
*
bond
,
int
inc
);
static
struct
dev_mc_list
*
bond_mc_list_find_dmi
(
struct
dev_mc_list
*
dmi
,
struct
dev_mc_list
*
mc_list
);
static
void
bond_set_slave_inactive_flags
(
slave_t
*
slave
);
static
void
bond_set_slave_inactive_flags
(
slave_t
*
slave
);
static
void
bond_set_slave_active_flags
(
slave_t
*
slave
);
static
void
bond_set_slave_active_flags
(
slave_t
*
slave
);
static
int
bond_enslave
(
struct
net_device
*
master
,
struct
net_device
*
slave
);
static
int
bond_enslave
(
struct
net_device
*
master
,
struct
net_device
*
slave
);
...
@@ -282,6 +303,11 @@ static int bond_get_info(char *buf, char **start, off_t offset, int length);
...
@@ -282,6 +303,11 @@ static int bond_get_info(char *buf, char **start, off_t offset, int length);
#define IS_UP(dev) ((((dev)->flags & (IFF_UP)) == (IFF_UP)) && \
#define IS_UP(dev) ((((dev)->flags & (IFF_UP)) == (IFF_UP)) && \
(netif_running(dev) && netif_carrier_ok(dev)))
(netif_running(dev) && netif_carrier_ok(dev)))
static
void
bond_restore_slave_flags
(
slave_t
*
slave
)
{
slave
->
dev
->
flags
=
slave
->
original_flags
;
}
static
void
bond_set_slave_inactive_flags
(
slave_t
*
slave
)
static
void
bond_set_slave_inactive_flags
(
slave_t
*
slave
)
{
{
slave
->
state
=
BOND_STATE_BACKUP
;
slave
->
state
=
BOND_STATE_BACKUP
;
...
@@ -431,6 +457,7 @@ static int bond_close(struct net_device *master)
...
@@ -431,6 +457,7 @@ static int bond_close(struct net_device *master)
/* Release the bonded slaves */
/* Release the bonded slaves */
bond_release_all
(
master
);
bond_release_all
(
master
);
bond_mc_list_destroy
(
bond
);
write_unlock_irqrestore
(
&
bond
->
lock
,
flags
);
write_unlock_irqrestore
(
&
bond
->
lock
,
flags
);
...
@@ -438,19 +465,180 @@ static int bond_close(struct net_device *master)
...
@@ -438,19 +465,180 @@ static int bond_close(struct net_device *master)
return
0
;
return
0
;
}
}
static
void
set_multicast_list
(
struct
net_device
*
master
)
/*
* flush all members of flush->mc_list from device dev->mc_list
*/
static
void
bond_mc_list_flush
(
struct
net_device
*
dev
,
struct
net_device
*
flush
)
{
{
struct
dev_mc_list
*
dmi
;
for
(
dmi
=
flush
->
mc_list
;
dmi
!=
NULL
;
dmi
=
dmi
->
next
)
dev_mc_delete
(
dev
,
dmi
->
dmi_addr
,
dmi
->
dmi_addrlen
,
0
);
}
/*
/*
bonding_t *bond = master->priv;
* Totally destroys the mc_list in bond
*/
static
void
bond_mc_list_destroy
(
struct
bonding
*
bond
)
{
struct
dev_mc_list
*
dmi
;
dmi
=
bond
->
mc_list
;
while
(
dmi
)
{
bond
->
mc_list
=
dmi
->
next
;
kfree
(
dmi
);
dmi
=
bond
->
mc_list
;
}
}
/*
* Add a Multicast address to every slave in the bonding group
*/
static
void
bond_mc_add
(
bonding_t
*
bond
,
void
*
addr
,
int
alen
)
{
slave_t
*
slave
;
for
(
slave
=
bond
->
prev
;
slave
!=
(
slave_t
*
)
bond
;
slave
=
slave
->
prev
)
{
dev_mc_add
(
slave
->
dev
,
addr
,
alen
,
0
);
}
}
/*
* Remove a multicast address from every slave in the bonding group
*/
static
void
bond_mc_delete
(
bonding_t
*
bond
,
void
*
addr
,
int
alen
)
{
slave_t
*
slave
;
slave_t
*
slave
;
for (slave = bond->next; slave != (slave_t*)bond; slave = slave->next) {
for
(
slave
=
bond
->
prev
;
slave
!=
(
slave_t
*
)
bond
;
slave
=
slave
->
prev
)
slave->dev->mc_list = master->mc_list;
dev_mc_delete
(
slave
->
dev
,
addr
,
alen
,
0
);
slave->dev->mc_count = master->mc_count;
}
slave->dev->flags = master->flags;
slave->dev->set_multicast_list(slave->dev);
/*
* Copy all the Multicast addresses from src to the bonding device dst
*/
static
int
bond_mc_list_copy
(
struct
dev_mc_list
*
src
,
struct
bonding
*
dst
,
int
gpf_flag
)
{
struct
dev_mc_list
*
dmi
,
*
new_dmi
;
for
(
dmi
=
src
;
dmi
!=
NULL
;
dmi
=
dmi
->
next
)
{
new_dmi
=
kmalloc
(
sizeof
(
struct
dev_mc_list
),
gpf_flag
);
if
(
new_dmi
==
NULL
)
{
return
-
ENOMEM
;
}
new_dmi
->
next
=
dst
->
mc_list
;
dst
->
mc_list
=
new_dmi
;
new_dmi
->
dmi_addrlen
=
dmi
->
dmi_addrlen
;
memcpy
(
new_dmi
->
dmi_addr
,
dmi
->
dmi_addr
,
dmi
->
dmi_addrlen
);
new_dmi
->
dmi_users
=
dmi
->
dmi_users
;
new_dmi
->
dmi_gusers
=
dmi
->
dmi_gusers
;
}
}
return
0
;
}
/*
* Returns 0 if dmi1 and dmi2 are the same, non-0 otherwise
*/
static
inline
int
dmi_same
(
struct
dev_mc_list
*
dmi1
,
struct
dev_mc_list
*
dmi2
)
{
return
memcmp
(
dmi1
->
dmi_addr
,
dmi2
->
dmi_addr
,
dmi1
->
dmi_addrlen
)
==
0
&&
dmi1
->
dmi_addrlen
==
dmi2
->
dmi_addrlen
;
}
/*
* Push the promiscuity flag down to all slaves
*/
*/
static
void
bond_set_promiscuity
(
bonding_t
*
bond
,
int
inc
)
{
slave_t
*
slave
;
for
(
slave
=
bond
->
prev
;
slave
!=
(
slave_t
*
)
bond
;
slave
=
slave
->
prev
)
dev_set_promiscuity
(
slave
->
dev
,
inc
);
}
/*
* Push the allmulti flag down to all slaves
*/
static
void
bond_set_allmulti
(
bonding_t
*
bond
,
int
inc
)
{
slave_t
*
slave
;
for
(
slave
=
bond
->
prev
;
slave
!=
(
slave_t
*
)
bond
;
slave
=
slave
->
prev
)
dev_set_allmulti
(
slave
->
dev
,
inc
);
}
/*
* returns dmi entry if found, NULL otherwise
*/
static
struct
dev_mc_list
*
bond_mc_list_find_dmi
(
struct
dev_mc_list
*
dmi
,
struct
dev_mc_list
*
mc_list
)
{
struct
dev_mc_list
*
idmi
;
for
(
idmi
=
mc_list
;
idmi
!=
NULL
;
idmi
=
idmi
->
next
)
{
if
(
dmi_same
(
dmi
,
idmi
))
{
return
idmi
;
}
}
return
NULL
;
}
static
void
set_multicast_list
(
struct
net_device
*
master
)
{
bonding_t
*
bond
=
master
->
priv
;
struct
dev_mc_list
*
dmi
;
unsigned
long
flags
=
0
;
/*
* Lock the private data for the master
*/
write_lock_irqsave
(
&
bond
->
lock
,
flags
);
/*
* Lock the master device so that noone trys to transmit
* while we're changing things
*/
spin_lock_bh
(
&
master
->
xmit_lock
);
/* set promiscuity flag to slaves */
if
(
(
master
->
flags
&
IFF_PROMISC
)
&&
!
(
bond
->
flags
&
IFF_PROMISC
)
)
bond_set_promiscuity
(
bond
,
1
);
if
(
!
(
master
->
flags
&
IFF_PROMISC
)
&&
(
bond
->
flags
&
IFF_PROMISC
)
)
bond_set_promiscuity
(
bond
,
-
1
);
/* set allmulti flag to slaves */
if
(
(
master
->
flags
&
IFF_ALLMULTI
)
&&
!
(
bond
->
flags
&
IFF_ALLMULTI
)
)
bond_set_allmulti
(
bond
,
1
);
if
(
!
(
master
->
flags
&
IFF_ALLMULTI
)
&&
(
bond
->
flags
&
IFF_ALLMULTI
)
)
bond_set_allmulti
(
bond
,
-
1
);
bond
->
flags
=
master
->
flags
;
/* looking for addresses to add to slaves' mc list */
for
(
dmi
=
master
->
mc_list
;
dmi
!=
NULL
;
dmi
=
dmi
->
next
)
{
if
(
bond_mc_list_find_dmi
(
dmi
,
bond
->
mc_list
)
==
NULL
)
bond_mc_add
(
bond
,
dmi
->
dmi_addr
,
dmi
->
dmi_addrlen
);
}
/* looking for addresses to delete from slaves' list */
for
(
dmi
=
bond
->
mc_list
;
dmi
!=
NULL
;
dmi
=
dmi
->
next
)
{
if
(
bond_mc_list_find_dmi
(
dmi
,
master
->
mc_list
)
==
NULL
)
bond_mc_delete
(
bond
,
dmi
->
dmi_addr
,
dmi
->
dmi_addrlen
);
}
/* save master's multicast list */
bond_mc_list_destroy
(
bond
);
bond_mc_list_copy
(
master
->
mc_list
,
bond
,
GFP_KERNEL
);
spin_unlock_bh
(
&
master
->
xmit_lock
);
write_unlock_irqrestore
(
&
bond
->
lock
,
flags
);
}
}
/*
/*
...
@@ -476,6 +664,7 @@ static int bond_enslave(struct net_device *master_dev,
...
@@ -476,6 +664,7 @@ static int bond_enslave(struct net_device *master_dev,
unsigned
long
flags
=
0
;
unsigned
long
flags
=
0
;
int
ndx
=
0
;
int
ndx
=
0
;
int
err
=
0
;
int
err
=
0
;
struct
dev_mc_list
*
dmi
;
if
(
master_dev
==
NULL
||
slave_dev
==
NULL
)
{
if
(
master_dev
==
NULL
||
slave_dev
==
NULL
)
{
return
-
ENODEV
;
return
-
ENODEV
;
...
@@ -513,6 +702,8 @@ static int bond_enslave(struct net_device *master_dev,
...
@@ -513,6 +702,8 @@ static int bond_enslave(struct net_device *master_dev,
}
}
memset
(
new_slave
,
0
,
sizeof
(
slave_t
));
memset
(
new_slave
,
0
,
sizeof
(
slave_t
));
/* save flags before call to netdev_set_master */
new_slave
->
original_flags
=
slave_dev
->
flags
;
err
=
netdev_set_master
(
slave_dev
,
master_dev
);
err
=
netdev_set_master
(
slave_dev
,
master_dev
);
if
(
err
)
{
if
(
err
)
{
...
@@ -526,10 +717,38 @@ static int bond_enslave(struct net_device *master_dev,
...
@@ -526,10 +717,38 @@ static int bond_enslave(struct net_device *master_dev,
new_slave
->
dev
=
slave_dev
;
new_slave
->
dev
=
slave_dev
;
/* set promiscuity level to new slave */
if
(
master_dev
->
flags
&
IFF_PROMISC
)
dev_set_promiscuity
(
slave_dev
,
1
);
/* set allmulti level to new slave */
if
(
master_dev
->
flags
&
IFF_ALLMULTI
)
dev_set_allmulti
(
slave_dev
,
1
);
/* upload master's mc_list to new slave */
for
(
dmi
=
master_dev
->
mc_list
;
dmi
!=
NULL
;
dmi
=
dmi
->
next
)
dev_mc_add
(
slave_dev
,
dmi
->
dmi_addr
,
dmi
->
dmi_addrlen
,
0
);
/*
/*
* queue to the end of the slaves list, make the first element its
* queue to the end of the slaves list, make the first element its
* successor, the last one its predecessor, and make it the bond's
* successor, the last one its predecessor, and make it the bond's
* predecessor.
* predecessor.
*
* Just to clarify, so future bonding driver hackers don't go through
* the same confusion stage I did trying to figure this out, the
* slaves are stored in a double linked circular list, sortof.
* In the ->next direction, the last slave points to the first slave,
* bypassing bond; only the slaves are in the ->next direction.
* In the ->prev direction, however, the first slave points to bond
* and bond points to the last slave.
*
* It looks like a circle with a little bubble hanging off one side
* in the ->prev direction only.
*
* When going through the list once, its best to start at bond->prev
* and go in the ->prev direction, testing for bond. Doing this
* in the ->next direction doesn't work. Trust me, I know this now.
* :) -mts 2002.03.14
*/
*/
new_slave
->
prev
=
bond
->
prev
;
new_slave
->
prev
=
bond
->
prev
;
new_slave
->
prev
->
next
=
new_slave
;
new_slave
->
prev
->
next
=
new_slave
;
...
@@ -838,10 +1057,20 @@ static int bond_release(struct net_device *master, struct net_device *slave)
...
@@ -838,10 +1057,20 @@ static int bond_release(struct net_device *master, struct net_device *slave)
}
else
{
}
else
{
printk
(
".
\n
"
);
printk
(
".
\n
"
);
}
}
kfree
(
our_slave
);
/* release the slave from its bond */
/* release the slave from its bond */
/* flush master's mc_list from slave */
bond_mc_list_flush
(
slave
,
master
);
/* unset promiscuity level from slave */
if
(
master
->
flags
&
IFF_PROMISC
)
dev_set_promiscuity
(
slave
,
-
1
);
/* unset allmulti level from slave */
if
(
master
->
flags
&
IFF_ALLMULTI
)
dev_set_allmulti
(
slave
,
-
1
);
netdev_set_master
(
slave
,
NULL
);
netdev_set_master
(
slave
,
NULL
);
/* only restore its RUNNING flag if monitoring set it down */
/* only restore its RUNNING flag if monitoring set it down */
...
@@ -854,6 +1083,9 @@ static int bond_release(struct net_device *master, struct net_device *slave)
...
@@ -854,6 +1083,9 @@ static int bond_release(struct net_device *master, struct net_device *slave)
dev_close
(
slave
);
dev_close
(
slave
);
}
}
bond_restore_slave_flags
(
our_slave
);
kfree
(
our_slave
);
if
(
bond
->
current_slave
==
NULL
)
{
if
(
bond
->
current_slave
==
NULL
)
{
printk
(
KERN_INFO
printk
(
KERN_INFO
"%s: now running without any active interface !
\n
"
,
"%s: now running without any active interface !
\n
"
,
...
@@ -1121,7 +1353,7 @@ static void bond_mii_monitor(struct net_device *master)
...
@@ -1121,7 +1353,7 @@ static void bond_mii_monitor(struct net_device *master)
master
->
name
,
bestslave
->
dev
->
name
,
master
->
name
,
bestslave
->
dev
->
name
,
(
updelay
-
bestslave
->
delay
)
*
miimon
);
(
updelay
-
bestslave
->
delay
)
*
miimon
);
bestslave
->
delay
=
0
;
bestslave
->
delay
=
0
;
bestslave
->
link
=
BOND_LINK_UP
;
bestslave
->
link
=
BOND_LINK_UP
;
}
}
...
@@ -1192,7 +1424,7 @@ static void bond_arp_monitor(struct net_device *master)
...
@@ -1192,7 +1424,7 @@ static void bond_arp_monitor(struct net_device *master)
read_lock
(
&
bond
->
ptrlock
);
read_lock
(
&
bond
->
ptrlock
);
if
(
(
!
(
slave
->
link
==
BOND_LINK_UP
))
if
(
(
!
(
slave
->
link
==
BOND_LINK_UP
))
&&
(
slave
!=
bond
->
current_slave
)
)
{
&&
(
slave
!=
bond
->
current_slave
)
)
{
read_unlock
(
&
bond
->
ptrlock
);
read_unlock
(
&
bond
->
ptrlock
);
...
@@ -1207,7 +1439,7 @@ static void bond_arp_monitor(struct net_device *master)
...
@@ -1207,7 +1439,7 @@ static void bond_arp_monitor(struct net_device *master)
slave
->
state
=
BOND_STATE_ACTIVE
;
slave
->
state
=
BOND_STATE_ACTIVE
;
bond
->
current_slave
=
slave
;
bond
->
current_slave
=
slave
;
}
}
if
(
slave
!=
bond
->
current_slave
)
{
if
(
slave
!=
bond
->
current_slave
)
{
slave
->
dev
->
flags
|=
IFF_NOARP
;
slave
->
dev
->
flags
|=
IFF_NOARP
;
}
}
write_unlock
(
&
bond
->
ptrlock
);
write_unlock
(
&
bond
->
ptrlock
);
...
@@ -1311,7 +1543,7 @@ static void bond_arp_monitor(struct net_device *master)
...
@@ -1311,7 +1543,7 @@ static void bond_arp_monitor(struct net_device *master)
#define isdigit(c) (c >= '0' && c <= '9')
#define isdigit(c) (c >= '0' && c <= '9')
__inline
static
int
atoi
(
char
**
s
)
__inline
static
int
atoi
(
char
**
s
)
{
{
int
i
=
0
;
int
i
=
0
;
while
(
isdigit
(
**
s
))
while
(
isdigit
(
**
s
))
i
=
i
*
20
+
*
((
*
s
)
++
)
-
'0'
;
i
=
i
*
20
+
*
((
*
s
)
++
)
-
'0'
;
return
i
;
return
i
;
...
@@ -1388,7 +1620,7 @@ my_inet_aton(char *cp, unsigned long *the_addr) {
...
@@ -1388,7 +1620,7 @@ my_inet_aton(char *cp, unsigned long *the_addr) {
goto
ret_0
;
goto
ret_0
;
}
}
if
(
the_addr
!=
NULL
)
{
if
(
the_addr
!=
NULL
)
{
*
the_addr
=
res
.
word
|
htonl
(
val
);
*
the_addr
=
res
.
word
|
htonl
(
val
);
}
}
...
@@ -1420,7 +1652,7 @@ static int bond_info_query(struct net_device *master, struct ifbond *info)
...
@@ -1420,7 +1652,7 @@ static int bond_info_query(struct net_device *master, struct ifbond *info)
info
->
miimon
=
miimon
;
info
->
miimon
=
miimon
;
read_lock_irqsave
(
&
bond
->
lock
,
flags
);
read_lock_irqsave
(
&
bond
->
lock
,
flags
);
for
(
slave
=
bond
->
prev
;
slave
!=
(
slave_t
*
)
bond
;
slave
=
slave
->
prev
)
{
for
(
slave
=
bond
->
prev
;
slave
!=
(
slave_t
*
)
bond
;
slave
=
slave
->
prev
)
{
info
->
num_slaves
++
;
info
->
num_slaves
++
;
}
}
read_unlock_irqrestore
(
&
bond
->
lock
,
flags
);
read_unlock_irqrestore
(
&
bond
->
lock
,
flags
);
...
@@ -1696,7 +1928,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *dev)
...
@@ -1696,7 +1928,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *dev)
/* if we are sending arp packets, try to at least
/* if we are sending arp packets, try to at least
identify our own ip address */
identify our own ip address */
if
(
(
arp_interval
>
0
)
&&
(
my_ip
==
0
)
&&
if
(
(
arp_interval
>
0
)
&&
(
my_ip
==
0
)
&&
(
skb
->
protocol
==
__constant_htons
(
ETH_P_ARP
)
)
)
{
(
skb
->
protocol
==
__constant_htons
(
ETH_P_ARP
)
)
)
{
char
*
the_ip
=
(((
char
*
)
skb
->
data
))
char
*
the_ip
=
(((
char
*
)
skb
->
data
))
+
sizeof
(
struct
ethhdr
)
+
sizeof
(
struct
ethhdr
)
...
@@ -1708,7 +1940,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *dev)
...
@@ -1708,7 +1940,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *dev)
/* if we are sending arp packets and don't know
/* if we are sending arp packets and don't know
the target hw address, save it so we don't need
the target hw address, save it so we don't need
to use a broadcast address */
to use a broadcast address */
if
(
(
arp_interval
>
0
)
&&
(
arp_target_hw_addr
==
NULL
)
&&
if
(
(
arp_interval
>
0
)
&&
(
arp_target_hw_addr
==
NULL
)
&&
(
skb
->
protocol
==
__constant_htons
(
ETH_P_IP
)
)
)
{
(
skb
->
protocol
==
__constant_htons
(
ETH_P_IP
)
)
)
{
struct
ethhdr
*
eth_hdr
=
struct
ethhdr
*
eth_hdr
=
(
struct
ethhdr
*
)
(((
char
*
)
skb
->
data
));
(
struct
ethhdr
*
)
(((
char
*
)
skb
->
data
));
...
@@ -1751,7 +1983,7 @@ static struct net_device_stats *bond_get_stats(struct net_device *dev)
...
@@ -1751,7 +1983,7 @@ static struct net_device_stats *bond_get_stats(struct net_device *dev)
read_lock_irqsave
(
&
bond
->
lock
,
flags
);
read_lock_irqsave
(
&
bond
->
lock
,
flags
);
for
(
slave
=
bond
->
prev
;
slave
!=
(
slave_t
*
)
bond
;
slave
=
slave
->
prev
)
{
for
(
slave
=
bond
->
prev
;
slave
!=
(
slave_t
*
)
bond
;
slave
=
slave
->
prev
)
{
sstats
=
slave
->
dev
->
get_stats
(
slave
->
dev
);
sstats
=
slave
->
dev
->
get_stats
(
slave
->
dev
);
stats
->
rx_packets
+=
sstats
->
rx_packets
;
stats
->
rx_packets
+=
sstats
->
rx_packets
;
...
@@ -1861,7 +2093,7 @@ static int bond_get_info(char *buf, char **start, off_t offset, int length)
...
@@ -1861,7 +2093,7 @@ static int bond_get_info(char *buf, char **start, off_t offset, int length)
static
int
bond_event
(
struct
notifier_block
*
this
,
unsigned
long
event
,
static
int
bond_event
(
struct
notifier_block
*
this
,
unsigned
long
event
,
void
*
ptr
)
void
*
ptr
)
{
{
struct
bonding
*
this_bond
=
(
struct
bonding
*
)
these_bonds
;
struct
bonding
*
this_bond
=
(
struct
bonding
*
)
these_bonds
;
struct
bonding
*
last_bond
;
struct
bonding
*
last_bond
;
struct
net_device
*
event_dev
=
(
struct
net_device
*
)
ptr
;
struct
net_device
*
event_dev
=
(
struct
net_device
*
)
ptr
;
...
@@ -1905,10 +2137,8 @@ static int bond_event(struct notifier_block *this, unsigned long event,
...
@@ -1905,10 +2137,8 @@ static int bond_event(struct notifier_block *this, unsigned long event,
return
NOTIFY_DONE
;
return
NOTIFY_DONE
;
}
}
static
struct
notifier_block
bond_netdev_notifier
=
{
static
struct
notifier_block
bond_netdev_notifier
=
{
bond_event
,
notifier_call:
bond_event
,
NULL
,
0
};
};
static
int
__init
bond_init
(
struct
net_device
*
dev
)
static
int
__init
bond_init
(
struct
net_device
*
dev
)
...
@@ -2038,6 +2268,13 @@ static int __init bonding_init(void)
...
@@ -2038,6 +2268,13 @@ static int __init bonding_init(void)
/* Find a name for this unit */
/* Find a name for this unit */
static
struct
net_device
*
dev_bond
=
NULL
;
static
struct
net_device
*
dev_bond
=
NULL
;
if
(
max_bonds
<
1
||
max_bonds
>
INT_MAX
)
{
printk
(
KERN_WARNING
"bonding_init(): max_bonds (%d) not in range %d-%d, "
"so it was reset to BOND_DEFAULT_MAX_BONDS (%d)"
,
max_bonds
,
1
,
INT_MAX
,
BOND_DEFAULT_MAX_BONDS
);
max_bonds
=
BOND_DEFAULT_MAX_BONDS
;
}
dev_bond
=
dev_bonds
=
kmalloc
(
max_bonds
*
sizeof
(
struct
net_device
),
dev_bond
=
dev_bonds
=
kmalloc
(
max_bonds
*
sizeof
(
struct
net_device
),
GFP_KERNEL
);
GFP_KERNEL
);
if
(
dev_bond
==
NULL
)
{
if
(
dev_bond
==
NULL
)
{
...
...
include/asm-sparc/btfixup.h
View file @
868f24fc
...
@@ -16,7 +16,22 @@ extern unsigned int ___illegal_use_of_BTFIXUP_SIMM13_in_module(void);
...
@@ -16,7 +16,22 @@ extern unsigned int ___illegal_use_of_BTFIXUP_SIMM13_in_module(void);
extern
unsigned
int
___illegal_use_of_BTFIXUP_SETHI_in_module
(
void
);
extern
unsigned
int
___illegal_use_of_BTFIXUP_SETHI_in_module
(
void
);
extern
unsigned
int
___illegal_use_of_BTFIXUP_HALF_in_module
(
void
);
extern
unsigned
int
___illegal_use_of_BTFIXUP_HALF_in_module
(
void
);
extern
unsigned
int
___illegal_use_of_BTFIXUP_INT_in_module
(
void
);
extern
unsigned
int
___illegal_use_of_BTFIXUP_INT_in_module
(
void
);
#endif
#define BTFIXUP_SIMM13(__name) ___illegal_use_of_BTFIXUP_SIMM13_in_module()
#define BTFIXUP_HALF(__name) ___illegal_use_of_BTFIXUP_HALF_in_module()
#define BTFIXUP_SETHI(__name) ___illegal_use_of_BTFIXUP_SETHI_in_module()
#define BTFIXUP_INT(__name) ___illegal_use_of_BTFIXUP_INT_in_module()
#define BTFIXUP_BLACKBOX(__name) ___illegal_use_of_BTFIXUP_BLACKBOX_in_module
#else
#define BTFIXUP_SIMM13(__name) ___sf_##__name()
#define BTFIXUP_HALF(__name) ___af_##__name()
#define BTFIXUP_SETHI(__name) ___hf_##__name()
#define BTFIXUP_INT(__name) ((unsigned int)&___i_##__name)
/* This must be written in assembly and present in a sethi */
#define BTFIXUP_BLACKBOX(__name) ___b_##__name
#endif
/* MODULE */
/* Fixup call xx */
/* Fixup call xx */
...
@@ -30,12 +45,6 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
...
@@ -30,12 +45,6 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
#define BTFIXUPDEF_BLACKBOX(__name) \
#define BTFIXUPDEF_BLACKBOX(__name) \
extern unsigned ___bs_##__name[2];
extern unsigned ___bs_##__name[2];
#ifdef MODULE
#define BTFIXUP_BLACKBOX(__name) ___illegal_use_of_BTFIXUP_BLACKBOX_in_module
#else
/* This must be written in assembly and present in a sethi */
#define BTFIXUP_BLACKBOX(__name) ___b_##__name
#endif
/* Put bottom 13bits into some register variable */
/* Put bottom 13bits into some register variable */
...
@@ -55,11 +64,6 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
...
@@ -55,11 +64,6 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
__asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
__asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
return ret; \
return ret; \
}
}
#ifdef MODULE
#define BTFIXUP_SIMM13(__name) ___illegal_use_of_BTFIXUP_SIMM13_in_module()
#else
#define BTFIXUP_SIMM13(__name) ___sf_##__name()
#endif
/* Put either bottom 13 bits, or upper 22 bits into some register variable
/* Put either bottom 13 bits, or upper 22 bits into some register variable
* (depending on the value, this will lead into sethi FIX, reg; or
* (depending on the value, this will lead into sethi FIX, reg; or
...
@@ -82,11 +86,6 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
...
@@ -82,11 +86,6 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
__asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
__asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
return ret; \
return ret; \
}
}
#ifdef MODULE
#define BTFIXUP_HALF(__name) ___illegal_use_of_BTFIXUP_HALF_in_module()
#else
#define BTFIXUP_HALF(__name) ___af_##__name()
#endif
/* Put upper 22 bits into some register variable */
/* Put upper 22 bits into some register variable */
...
@@ -107,22 +106,12 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
...
@@ -107,22 +106,12 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
"=r"(ret)); \
"=r"(ret)); \
return ret; \
return ret; \
}
}
#ifdef MODULE
#define BTFIXUP_SETHI(__name) ___illegal_use_of_BTFIXUP_SETHI_in_module()
#else
#define BTFIXUP_SETHI(__name) ___hf_##__name()
#endif
/* Put a full 32bit integer into some register variable */
/* Put a full 32bit integer into some register variable */
#define BTFIXUPDEF_INT(__name) \
#define BTFIXUPDEF_INT(__name) \
extern unsigned char ___i_##__name; \
extern unsigned char ___i_##__name; \
extern unsigned ___is_##__name[2];
extern unsigned ___is_##__name[2];
#ifdef MODULE
#define BTFIXUP_INT(__name) ___illegal_use_of_BTFIXUP_INT_in_module()
#else
#define BTFIXUP_INT(__name) ((unsigned int)&___i_##__name)
#endif
#define BTFIXUPCALL_NORM 0x00000000
/* Always call */
#define BTFIXUPCALL_NORM 0x00000000
/* Always call */
#define BTFIXUPCALL_NOP 0x01000000
/* Possibly optimize to nop */
#define BTFIXUPCALL_NOP 0x01000000
/* Possibly optimize to nop */
...
...
include/asm-sparc/page.h
View file @
868f24fc
...
@@ -54,8 +54,8 @@
...
@@ -54,8 +54,8 @@
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
#define clear_user_page(
page, vaddr) clear_page(page
)
#define clear_user_page(
addr, vaddr, page) clear_page(addr
)
#define copy_user_page(to, from, vaddr) copy_page(to, from)
#define copy_user_page(to, from, vaddr
, page
) copy_page(to, from)
/* The following structure is used to hold the physical
/* The following structure is used to hold the physical
* memory configuration of the machine. This is filled in
* memory configuration of the machine. This is filled in
...
...
include/asm-sparc/ultra.h
deleted
100644 → 0
View file @
a9274e96
/* $Id: ultra.h,v 1.2 1995/11/25 02:33:10 davem Exp $
* ultra.h: Definitions and defines for the TI V9 UltraSparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_ULTRA_H
#define _SPARC_ULTRA_H
/* Spitfire MMU control register:
*
* ----------------------------------------------------------
* | | IMPL | VERS | | MID | |
* ----------------------------------------------------------
* 64 31-28 27-24 23-22 21-17 16 0
*
* IMPL: Implementation of this Spitfire.
* VERS: Version of this Spitfire.
* MID: Module ID of this processor.
*/
#define SPITFIRE_MIDMASK 0x00000000003e0000
/* Spitfire Load Store Unit control register:
*
* ---------------------------------------------------------------------
* | RSV | PWR | PWW | VWR | VWW | RSV | PMASK | DME | IME | DCE | ICE |
* ---------------------------------------------------------------------
* 63-25 24 23 22 21 20 19-4 3 2 1 0
*
* PWR: Physical Watchpoint Read enable: 0=off 1=on
* PWW: Physical Watchpoint Write enable: 0=off 1=on
* VWR: Virtual Watchpoint Read enable: 0=off 1=on
* VWW: Virtual Watchpoint Write enable: 0=off 1=on
* PMASK: Parity MASK ???
* DME: Data MMU Enable: 0=off 1=on
* IME: Instruction MMU Enable: 0=off 1=on
* DCE: Data Cache Enable: 0=off 1=on
* ICE: Instruction Cache Enable: 0=off 1=on
*/
#define SPITFIRE_LSU_PWR 0x01000000
#define SPITFIRE_LSU_PWW 0x00800000
#define SPITFIRE_LSU_VWR 0x00400000
#define SPITFIRE_LSU_VWW 0x00200000
#define SPITFIRE_LSU_PMASK 0x000ffff0
#define SPITFIRE_LSU_DME 0x00000008
#define SPITFIRE_LSU_IME 0x00000004
#define SPITFIRE_LSU_DCE 0x00000002
#define SPITFIRE_LSU_ICE 0x00000001
#endif
/* !(_SPARC_ULTRA_H) */
include/asm-sparc/vac-ops.h
View file @
868f24fc
...
@@ -107,8 +107,6 @@ struct sun4c_vac_props {
...
@@ -107,8 +107,6 @@ struct sun4c_vac_props {
extern
struct
sun4c_vac_props
sun4c_vacinfo
;
extern
struct
sun4c_vac_props
sun4c_vacinfo
;
extern
void
sun4c_flush_all
(
void
);
/* sun4c_enable_vac() enables the sun4c virtual address cache. */
/* sun4c_enable_vac() enables the sun4c virtual address cache. */
extern
__inline__
void
sun4c_enable_vac
(
void
)
extern
__inline__
void
sun4c_enable_vac
(
void
)
{
{
...
...
include/asm-sparc64/page.h
View file @
868f24fc
...
@@ -35,8 +35,9 @@ extern void do_BUG(const char *file, int line);
...
@@ -35,8 +35,9 @@ extern void do_BUG(const char *file, int line);
extern
void
_clear_page
(
void
*
page
);
extern
void
_clear_page
(
void
*
page
);
#define clear_page(X) _clear_page((void *)(X))
#define clear_page(X) _clear_page((void *)(X))
extern
void
clear_user_page
(
void
*
page
,
unsigned
long
vaddr
);
struct
page
;
extern
void
copy_user_page
(
void
*
to
,
void
*
from
,
unsigned
long
vaddr
);
extern
void
clear_user_page
(
void
*
addr
,
unsigned
long
vaddr
,
struct
page
*
page
);
extern
void
copy_user_page
(
void
*
to
,
void
*
from
,
unsigned
long
vaddr
,
struct
page
*
topage
);
/* GROSS, defining this makes gcc pass these types as aggregates,
/* GROSS, defining this makes gcc pass these types as aggregates,
* and thus on the stack, turn this crap off... -DaveM
* and thus on the stack, turn this crap off... -DaveM
...
...
include/linux/if_bonding.h
View file @
868f24fc
...
@@ -51,7 +51,7 @@
...
@@ -51,7 +51,7 @@
#define BOND_STATE_ACTIVE 0
/* link is active */
#define BOND_STATE_ACTIVE 0
/* link is active */
#define BOND_STATE_BACKUP 1
/* link is backup */
#define BOND_STATE_BACKUP 1
/* link is backup */
#define
MAX_BONDS 1
/* M
aximum number of devices to support */
#define
BOND_DEFAULT_MAX_BONDS 1
/* Default m
aximum number of devices to support */
typedef
struct
ifbond
{
typedef
struct
ifbond
{
__s32
bond_mode
;
__s32
bond_mode
;
...
@@ -76,6 +76,7 @@ typedef struct slave {
...
@@ -76,6 +76,7 @@ typedef struct slave {
short
delay
;
short
delay
;
char
link
;
/* one of BOND_LINK_XXXX */
char
link
;
/* one of BOND_LINK_XXXX */
char
state
;
/* one of BOND_STATE_XXXX */
char
state
;
/* one of BOND_STATE_XXXX */
unsigned
short
original_flags
;
u32
link_failure_count
;
u32
link_failure_count
;
}
slave_t
;
}
slave_t
;
...
@@ -104,6 +105,8 @@ typedef struct bonding {
...
@@ -104,6 +105,8 @@ typedef struct bonding {
#endif
/* CONFIG_PROC_FS */
#endif
/* CONFIG_PROC_FS */
struct
bonding
*
next_bond
;
struct
bonding
*
next_bond
;
struct
net_device
*
device
;
struct
net_device
*
device
;
struct
dev_mc_list
*
mc_list
;
unsigned
short
flags
;
}
bonding_t
;
}
bonding_t
;
#endif
/* __KERNEL__ */
#endif
/* __KERNEL__ */
...
...
include/net/pkt_sched.h
View file @
868f24fc
...
@@ -8,6 +8,7 @@
...
@@ -8,6 +8,7 @@
#define PSCHED_CLOCK_SOURCE PSCHED_JIFFIES
#define PSCHED_CLOCK_SOURCE PSCHED_JIFFIES
#include <linux/config.h>
#include <linux/config.h>
#include <linux/types.h>
#include <linux/pkt_sched.h>
#include <linux/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/pkt_cls.h>
...
@@ -221,7 +222,7 @@ extern psched_time_t psched_time_base;
...
@@ -221,7 +222,7 @@ extern psched_time_t psched_time_base;
#define PSCHED_EXPORTLIST_2
#define PSCHED_EXPORTLIST_2
#if
~0UL == 0xFFFFFFFF
#if
BITS_PER_LONG <= 32
#define PSCHED_WATCHER unsigned long
#define PSCHED_WATCHER unsigned long
...
...
net/core/dev.c
View file @
868f24fc
...
@@ -20,17 +20,18 @@
...
@@ -20,17 +20,18 @@
* Pekka Riikonen <priikone@poesidon.pspt.fi>
* Pekka Riikonen <priikone@poesidon.pspt.fi>
*
*
* Changes:
* Changes:
* D.J. Barrow : Fixed bug where dev->refcnt gets set
to 2
* D.J. Barrow : Fixed bug where dev->refcnt gets set
*
if register_netdev gets called before
*
to 2 if register_netdev gets called
*
net_dev_init & also removed a few lines
*
before net_dev_init & also removed a
*
of code in the process.
*
few lines
of code in the process.
* Alan Cox : device private ioctl copies fields back.
* Alan Cox : device private ioctl copies fields back.
* Alan Cox : Transmit queue code does relevant
stunts to
* Alan Cox : Transmit queue code does relevant
* keep the queue safe.
*
stunts to
keep the queue safe.
* Alan Cox : Fixed double lock.
* Alan Cox : Fixed double lock.
* Alan Cox : Fixed promisc NULL pointer trap
* Alan Cox : Fixed promisc NULL pointer trap
* ???????? : Support the full private ioctl range
* ???????? : Support the full private ioctl range
* Alan Cox : Moved ioctl permission check into drivers
* Alan Cox : Moved ioctl permission check into
* drivers
* Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
* Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
* Alan Cox : 100 backlog just doesn't cut it when
* Alan Cox : 100 backlog just doesn't cut it when
* you start doing multicast video 8)
* you start doing multicast video 8)
...
@@ -38,16 +39,19 @@
...
@@ -38,16 +39,19 @@
* Alan Cox : Fix ETH_P_ALL echoback lengths.
* Alan Cox : Fix ETH_P_ALL echoback lengths.
* Alan Cox : Took out transmit every packet pass
* Alan Cox : Took out transmit every packet pass
* Saved a few bytes in the ioctl handler
* Saved a few bytes in the ioctl handler
* Alan Cox : Network driver sets packet type before calling netif_rx. Saves
* Alan Cox : Network driver sets packet type before
* a function call a packet.
* calling netif_rx. Saves a function
* call a packet.
* Alan Cox : Hashed net_bh()
* Alan Cox : Hashed net_bh()
* Richard Kooijman: Timestamp fixes.
* Richard Kooijman: Timestamp fixes.
* Alan Cox : Wrong field in SIOCGIFDSTADDR
* Alan Cox : Wrong field in SIOCGIFDSTADDR
* Alan Cox : Device lock protection.
* Alan Cox : Device lock protection.
* Alan Cox : Fixed nasty side effect of device close changes.
* Alan Cox : Fixed nasty side effect of device close
* Rudi Cilibrasi : Pass the right thing to set_mac_address()
* changes.
* Dave Miller : 32bit quantity for the device lock to make it work out
* Rudi Cilibrasi : Pass the right thing to
* on a Sparc.
* set_mac_address()
* Dave Miller : 32bit quantity for the device lock to
* make it work out on a Sparc.
* Bjorn Ekwall : Added KERNELD hack.
* Bjorn Ekwall : Added KERNELD hack.
* Alan Cox : Cleaned up the backlog initialise.
* Alan Cox : Cleaned up the backlog initialise.
* Craig Metz : SIOCGIFCONF fix if space for under
* Craig Metz : SIOCGIFCONF fix if space for under
...
@@ -62,7 +66,8 @@
...
@@ -62,7 +66,8 @@
* the backlog queue.
* the backlog queue.
* Paul Rusty Russell : SIOCSIFNAME
* Paul Rusty Russell : SIOCSIFNAME
* Pekka Riikonen : Netdev boot-time settings code
* Pekka Riikonen : Netdev boot-time settings code
* Andrew Morton : Make unregister_netdevice wait indefinitely on dev->refcnt
* Andrew Morton : Make unregister_netdevice wait
* indefinitely on dev->refcnt
* J Hadi Salim : - Backlog queue sampling
* J Hadi Salim : - Backlog queue sampling
* - netif_rx() feedback
* - netif_rx() feedback
*/
*/
...
@@ -163,7 +168,7 @@ const char *if_port_text[] = {
...
@@ -163,7 +168,7 @@ const char *if_port_text[] = {
*/
*/
static
struct
packet_type
*
ptype_base
[
16
];
/* 16 way hashed list */
static
struct
packet_type
*
ptype_base
[
16
];
/* 16 way hashed list */
static
struct
packet_type
*
ptype_all
=
NULL
;
/* Taps */
static
struct
packet_type
*
ptype_all
;
/* Taps */
#ifdef OFFLINE_SAMPLE
#ifdef OFFLINE_SAMPLE
static
void
sample_queue
(
unsigned
long
dummy
);
static
void
sample_queue
(
unsigned
long
dummy
);
...
@@ -180,7 +185,7 @@ static int net_run_sbin_hotplug(struct net_device *dev, char *action);
...
@@ -180,7 +185,7 @@ static int net_run_sbin_hotplug(struct net_device *dev, char *action);
* Our notifier list
* Our notifier list
*/
*/
static
struct
notifier_block
*
netdev_chain
=
NULL
;
static
struct
notifier_block
*
netdev_chain
;
/*
/*
* Device drivers call our routines to queue packets here. We empty the
* Device drivers call our routines to queue packets here. We empty the
...
@@ -194,17 +199,17 @@ int netdev_fastroute_obstacles;
...
@@ -194,17 +199,17 @@ int netdev_fastroute_obstacles;
#endif
#endif
/*******************************************************************************
***********
/*******************************************************************************
Protocol management and registration routines
Protocol management and registration routines
*******************************************************************************
************
/
*******************************************************************************/
/*
/*
* For efficiency
* For efficiency
*/
*/
int
netdev_nit
=
0
;
int
netdev_nit
;
/*
/*
* Add a protocol ID to the list. Now that the input handler is
* Add a protocol ID to the list. Now that the input handler is
...
@@ -239,17 +244,17 @@ void dev_add_pack(struct packet_type *pt)
...
@@ -239,17 +244,17 @@ void dev_add_pack(struct packet_type *pt)
#ifdef CONFIG_NET_FASTROUTE
#ifdef CONFIG_NET_FASTROUTE
/* Hack to detect packet socket */
/* Hack to detect packet socket */
if
(
(
pt
->
data
)
&&
((
int
)(
pt
->
data
)
!=
1
)
)
{
if
(
pt
->
data
&&
(
long
)(
pt
->
data
)
!=
1
)
{
netdev_fastroute_obstacles
++
;
netdev_fastroute_obstacles
++
;
dev_clear_fastroute
(
pt
->
dev
);
dev_clear_fastroute
(
pt
->
dev
);
}
}
#endif
#endif
if
(
pt
->
type
==
htons
(
ETH_P_ALL
))
{
if
(
pt
->
type
==
htons
(
ETH_P_ALL
))
{
netdev_nit
++
;
netdev_nit
++
;
pt
->
next
=
ptype_all
;
pt
->
next
=
ptype_all
;
ptype_all
=
pt
;
ptype_all
=
pt
;
}
else
{
}
else
{
hash
=
ntohs
(
pt
->
type
)
&
15
;
hash
=
ntohs
(
pt
->
type
)
&
15
;
pt
->
next
=
ptype_base
[
hash
];
pt
->
next
=
ptype_base
[
hash
];
ptype_base
[
hash
]
=
pt
;
ptype_base
[
hash
]
=
pt
;
}
}
...
@@ -266,7 +271,6 @@ void dev_add_pack(struct packet_type *pt)
...
@@ -266,7 +271,6 @@ void dev_add_pack(struct packet_type *pt)
* from the kernel lists and can be freed or reused once this function
* from the kernel lists and can be freed or reused once this function
* returns.
* returns.
*/
*/
void
dev_remove_pack
(
struct
packet_type
*
pt
)
void
dev_remove_pack
(
struct
packet_type
*
pt
)
{
{
struct
packet_type
**
pt1
;
struct
packet_type
**
pt1
;
...
@@ -275,24 +279,23 @@ void dev_remove_pack(struct packet_type *pt)
...
@@ -275,24 +279,23 @@ void dev_remove_pack(struct packet_type *pt)
if
(
pt
->
type
==
htons
(
ETH_P_ALL
))
{
if
(
pt
->
type
==
htons
(
ETH_P_ALL
))
{
netdev_nit
--
;
netdev_nit
--
;
pt1
=&
ptype_all
;
pt1
=
&
ptype_all
;
}
else
{
}
else
pt1
=&
ptype_base
[
ntohs
(
pt
->
type
)
&
15
];
pt1
=
&
ptype_base
[
ntohs
(
pt
->
type
)
&
15
];
}
for
(;
(
*
pt1
)
!=
NULL
;
pt1
=
&
((
*
pt1
)
->
next
))
{
for
(;
*
pt1
;
pt1
=
&
((
*
pt1
)
->
next
))
{
if
(
pt
==
(
*
pt1
)
)
{
if
(
pt
==
*
pt1
)
{
*
pt1
=
pt
->
next
;
*
pt1
=
pt
->
next
;
#ifdef CONFIG_NET_FASTROUTE
#ifdef CONFIG_NET_FASTROUTE
if
(
pt
->
data
)
if
(
pt
->
data
)
netdev_fastroute_obstacles
--
;
netdev_fastroute_obstacles
--
;
#endif
#endif
br_write_unlock_bh
(
BR_NETPROTO_LOCK
);
goto
out
;
return
;
}
}
}
}
br_write_unlock_bh
(
BR_NETPROTO_LOCK
);
printk
(
KERN_WARNING
"dev_remove_pack: %p not found.
\n
"
,
pt
);
printk
(
KERN_WARNING
"dev_remove_pack: %p not found.
\n
"
,
pt
);
out:
br_write_unlock_bh
(
BR_NETPROTO_LOCK
);
}
}
/******************************************************************************
/******************************************************************************
...
@@ -328,10 +331,7 @@ int netdev_boot_setup_add(char *name, struct ifmap *map)
...
@@ -328,10 +331,7 @@ int netdev_boot_setup_add(char *name, struct ifmap *map)
}
}
}
}
if
(
i
>=
NETDEV_BOOT_SETUP_MAX
)
return
i
>=
NETDEV_BOOT_SETUP_MAX
?
0
:
1
;
return
0
;
return
1
;
}
}
/**
/**
...
@@ -345,10 +345,9 @@ int netdev_boot_setup_add(char *name, struct ifmap *map)
...
@@ -345,10 +345,9 @@ int netdev_boot_setup_add(char *name, struct ifmap *map)
*/
*/
int
netdev_boot_setup_check
(
struct
net_device
*
dev
)
int
netdev_boot_setup_check
(
struct
net_device
*
dev
)
{
{
struct
netdev_boot_setup
*
s
;
struct
netdev_boot_setup
*
s
=
dev_boot_setup
;
int
i
;
int
i
;
s
=
dev_boot_setup
;
for
(
i
=
0
;
i
<
NETDEV_BOOT_SETUP_MAX
;
i
++
)
{
for
(
i
=
0
;
i
<
NETDEV_BOOT_SETUP_MAX
;
i
++
)
{
if
(
s
[
i
].
name
[
0
]
!=
'\0'
&&
s
[
i
].
name
[
0
]
!=
' '
&&
if
(
s
[
i
].
name
[
0
]
!=
'\0'
&&
s
[
i
].
name
[
0
]
!=
' '
&&
!
strncmp
(
dev
->
name
,
s
[
i
].
name
,
strlen
(
s
[
i
].
name
)))
{
!
strncmp
(
dev
->
name
,
s
[
i
].
name
,
strlen
(
s
[
i
].
name
)))
{
...
@@ -391,11 +390,11 @@ int __init netdev_boot_setup(char *str)
...
@@ -391,11 +390,11 @@ int __init netdev_boot_setup(char *str)
__setup
(
"netdev="
,
netdev_boot_setup
);
__setup
(
"netdev="
,
netdev_boot_setup
);
/*******************************************************************************
**********
/*******************************************************************************
Device Interface Subroutines
Device Interface Subroutines
*******************************************************************************
***********
/
*******************************************************************************/
/**
/**
* __dev_get_by_name - find a device by its name
* __dev_get_by_name - find a device by its name
...
@@ -408,16 +407,14 @@ __setup("netdev=", netdev_boot_setup);
...
@@ -408,16 +407,14 @@ __setup("netdev=", netdev_boot_setup);
* careful with locks.
* careful with locks.
*/
*/
struct
net_device
*
__dev_get_by_name
(
const
char
*
name
)
struct
net_device
*
__dev_get_by_name
(
const
char
*
name
)
{
{
struct
net_device
*
dev
;
struct
net_device
*
dev
;
for
(
dev
=
dev_base
;
dev
!=
NULL
;
dev
=
dev
->
next
)
{
for
(
dev
=
dev_base
;
dev
;
dev
=
dev
->
next
)
if
(
strncmp
(
dev
->
name
,
name
,
IFNAMSIZ
)
==
0
)
if
(
!
strncmp
(
dev
->
name
,
name
,
IFNAMSIZ
))
break
;
return
dev
;
return
dev
;
}
return
NULL
;
}
}
/**
/**
...
@@ -462,7 +459,6 @@ struct net_device *dev_get_by_name(const char *name)
...
@@ -462,7 +459,6 @@ struct net_device *dev_get_by_name(const char *name)
* This function primarily exists for back compatibility with older
* This function primarily exists for back compatibility with older
* drivers.
* drivers.
*/
*/
int
dev_get
(
const
char
*
name
)
int
dev_get
(
const
char
*
name
)
{
{
struct
net_device
*
dev
;
struct
net_device
*
dev
;
...
@@ -484,15 +480,14 @@ int dev_get(const char *name)
...
@@ -484,15 +480,14 @@ int dev_get(const char *name)
* or @dev_base_lock.
* or @dev_base_lock.
*/
*/
struct
net_device
*
__dev_get_by_index
(
int
ifindex
)
struct
net_device
*
__dev_get_by_index
(
int
ifindex
)
{
{
struct
net_device
*
dev
;
struct
net_device
*
dev
;
for
(
dev
=
dev_base
;
dev
!=
NULL
;
dev
=
dev
->
next
)
{
for
(
dev
=
dev_base
;
dev
;
dev
=
dev
->
next
)
if
(
dev
->
ifindex
==
ifindex
)
if
(
dev
->
ifindex
==
ifindex
)
break
;
return
dev
;
return
dev
;
}
return
NULL
;
}
}
...
@@ -506,7 +501,7 @@ struct net_device * __dev_get_by_index(int ifindex)
...
@@ -506,7 +501,7 @@ struct net_device * __dev_get_by_index(int ifindex)
* dev_put to indicate they have finished with it.
* dev_put to indicate they have finished with it.
*/
*/
struct
net_device
*
dev_get_by_index
(
int
ifindex
)
struct
net_device
*
dev_get_by_index
(
int
ifindex
)
{
{
struct
net_device
*
dev
;
struct
net_device
*
dev
;
...
@@ -538,12 +533,11 @@ struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
...
@@ -538,12 +533,11 @@ struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
ASSERT_RTNL
();
ASSERT_RTNL
();
for
(
dev
=
dev_base
;
dev
!=
NULL
;
dev
=
dev
->
next
)
{
for
(
dev
=
dev_base
;
dev
;
dev
=
dev
->
next
)
if
(
dev
->
type
==
type
&&
if
(
dev
->
type
==
type
&&
memcmp
(
dev
->
dev_addr
,
ha
,
dev
->
addr_len
)
==
0
)
!
memcmp
(
dev
->
dev_addr
,
ha
,
dev
->
addr_len
))
break
;
return
dev
;
return
dev
;
}
return
NULL
;
}
}
/**
/**
...
@@ -570,15 +564,15 @@ int dev_alloc_name(struct net_device *dev, const char *name)
...
@@ -570,15 +564,15 @@ int dev_alloc_name(struct net_device *dev, const char *name)
* characters, or no "%" characters at all.
* characters, or no "%" characters at all.
*/
*/
p
=
strchr
(
name
,
'%'
);
p
=
strchr
(
name
,
'%'
);
if
(
p
&&
(
p
[
1
]
!=
'd'
||
strchr
(
p
+
2
,
'%'
)))
if
(
p
&&
(
p
[
1
]
!=
'd'
||
strchr
(
p
+
2
,
'%'
)))
return
-
EINVAL
;
return
-
EINVAL
;
/*
/*
* If you need over 100 please also fix the algorithm...
* If you need over 100 please also fix the algorithm...
*/
*/
for
(
i
=
0
;
i
<
100
;
i
++
)
{
for
(
i
=
0
;
i
<
100
;
i
++
)
{
snprintf
(
buf
,
sizeof
(
buf
),
name
,
i
);
snprintf
(
buf
,
sizeof
(
buf
),
name
,
i
);
if
(
__dev_get_by_name
(
buf
)
==
NULL
)
{
if
(
!
__dev_get_by_name
(
buf
)
)
{
strcpy
(
dev
->
name
,
buf
);
strcpy
(
dev
->
name
,
buf
);
return
i
;
return
i
;
}
}
...
@@ -604,16 +598,17 @@ int dev_alloc_name(struct net_device *dev, const char *name)
...
@@ -604,16 +598,17 @@ int dev_alloc_name(struct net_device *dev, const char *name)
struct
net_device
*
dev_alloc
(
const
char
*
name
,
int
*
err
)
struct
net_device
*
dev_alloc
(
const
char
*
name
,
int
*
err
)
{
{
struct
net_device
*
dev
=
kmalloc
(
sizeof
(
struct
net_device
),
GFP_KERNEL
);
struct
net_device
*
dev
=
kmalloc
(
sizeof
(
*
dev
),
GFP_KERNEL
);
if
(
dev
==
NULL
)
{
if
(
!
dev
)
*
err
=
-
ENOBUFS
;
*
err
=
-
ENOBUFS
;
return
NULL
;
else
{
}
memset
(
dev
,
0
,
sizeof
(
*
dev
));
memset
(
dev
,
0
,
sizeof
(
struct
net_device
));
*
err
=
dev_alloc_name
(
dev
,
name
);
*
err
=
dev_alloc_name
(
dev
,
name
);
if
(
*
err
<
0
)
{
if
(
*
err
<
0
)
{
kfree
(
dev
);
kfree
(
dev
);
return
NULL
;
dev
=
NULL
;
}
}
}
return
dev
;
return
dev
;
}
}
...
@@ -626,10 +621,9 @@ struct net_device *dev_alloc(const char *name, int *err)
...
@@ -626,10 +621,9 @@ struct net_device *dev_alloc(const char *name, int *err)
* the notifier chains for netdev_chain and sends a NEWLINK message
* the notifier chains for netdev_chain and sends a NEWLINK message
* to the routing socket.
* to the routing socket.
*/
*/
void
netdev_state_change
(
struct
net_device
*
dev
)
void
netdev_state_change
(
struct
net_device
*
dev
)
{
{
if
(
dev
->
flags
&
IFF_UP
)
{
if
(
dev
->
flags
&
IFF_UP
)
{
notifier_call_chain
(
&
netdev_chain
,
NETDEV_CHANGE
,
dev
);
notifier_call_chain
(
&
netdev_chain
,
NETDEV_CHANGE
,
dev
);
rtmsg_ifinfo
(
RTM_NEWLINK
,
dev
,
0
);
rtmsg_ifinfo
(
RTM_NEWLINK
,
dev
,
0
);
}
}
...
@@ -661,7 +655,8 @@ extern inline void dev_load(const char *unused){;}
...
@@ -661,7 +655,8 @@ extern inline void dev_load(const char *unused){;}
static
int
default_rebuild_header
(
struct
sk_buff
*
skb
)
static
int
default_rebuild_header
(
struct
sk_buff
*
skb
)
{
{
printk
(
KERN_DEBUG
"%s: default_rebuild_header called -- BUG!
\n
"
,
skb
->
dev
?
skb
->
dev
->
name
:
"NULL!!!"
);
printk
(
KERN_DEBUG
"%s: default_rebuild_header called -- BUG!
\n
"
,
skb
->
dev
?
skb
->
dev
->
name
:
"NULL!!!"
);
kfree_skb
(
skb
);
kfree_skb
(
skb
);
return
1
;
return
1
;
}
}
...
@@ -678,7 +673,6 @@ static int default_rebuild_header(struct sk_buff *skb)
...
@@ -678,7 +673,6 @@ static int default_rebuild_header(struct sk_buff *skb)
* Calling this function on an active interface is a nop. On a failure
* Calling this function on an active interface is a nop. On a failure
* a negative errno code is returned.
* a negative errno code is returned.
*/
*/
int
dev_open
(
struct
net_device
*
dev
)
int
dev_open
(
struct
net_device
*
dev
)
{
{
int
ret
=
0
;
int
ret
=
0
;
...
@@ -687,7 +681,7 @@ int dev_open(struct net_device *dev)
...
@@ -687,7 +681,7 @@ int dev_open(struct net_device *dev)
* Is it already up?
* Is it already up?
*/
*/
if
(
dev
->
flags
&
IFF_UP
)
if
(
dev
->
flags
&
IFF_UP
)
return
0
;
return
0
;
/*
/*
...
@@ -702,7 +696,7 @@ int dev_open(struct net_device *dev)
...
@@ -702,7 +696,7 @@ int dev_open(struct net_device *dev)
if
(
try_inc_mod_count
(
dev
->
owner
))
{
if
(
try_inc_mod_count
(
dev
->
owner
))
{
if
(
dev
->
open
)
{
if
(
dev
->
open
)
{
ret
=
dev
->
open
(
dev
);
ret
=
dev
->
open
(
dev
);
if
(
ret
!=
0
&&
dev
->
owner
)
if
(
ret
&&
dev
->
owner
)
__MOD_DEC_USE_COUNT
(
dev
->
owner
);
__MOD_DEC_USE_COUNT
(
dev
->
owner
);
}
}
}
else
{
}
else
{
...
@@ -713,8 +707,7 @@ int dev_open(struct net_device *dev)
...
@@ -713,8 +707,7 @@ int dev_open(struct net_device *dev)
* If it went open OK then:
* If it went open OK then:
*/
*/
if
(
ret
==
0
)
if
(
!
ret
)
{
{
/*
/*
* Set the flags.
* Set the flags.
*/
*/
...
@@ -737,7 +730,7 @@ int dev_open(struct net_device *dev)
...
@@ -737,7 +730,7 @@ int dev_open(struct net_device *dev)
*/
*/
notifier_call_chain
(
&
netdev_chain
,
NETDEV_UP
,
dev
);
notifier_call_chain
(
&
netdev_chain
,
NETDEV_UP
,
dev
);
}
}
return
(
ret
)
;
return
ret
;
}
}
#ifdef CONFIG_NET_FASTROUTE
#ifdef CONFIG_NET_FASTROUTE
...
@@ -747,7 +740,7 @@ static void dev_do_clear_fastroute(struct net_device *dev)
...
@@ -747,7 +740,7 @@ static void dev_do_clear_fastroute(struct net_device *dev)
if
(
dev
->
accept_fastpath
)
{
if
(
dev
->
accept_fastpath
)
{
int
i
;
int
i
;
for
(
i
=
0
;
i
<=
NETDEV_FASTROUTE_HMASK
;
i
++
)
{
for
(
i
=
0
;
i
<=
NETDEV_FASTROUTE_HMASK
;
i
++
)
{
struct
dst_entry
*
dst
;
struct
dst_entry
*
dst
;
write_lock_irq
(
&
dev
->
fastpath_lock
);
write_lock_irq
(
&
dev
->
fastpath_lock
);
...
@@ -782,10 +775,9 @@ void dev_clear_fastroute(struct net_device *dev)
...
@@ -782,10 +775,9 @@ void dev_clear_fastroute(struct net_device *dev)
* is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
* is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
* chain.
* chain.
*/
*/
int
dev_close
(
struct
net_device
*
dev
)
int
dev_close
(
struct
net_device
*
dev
)
{
{
if
(
!
(
dev
->
flags
&
IFF_UP
))
if
(
!
(
dev
->
flags
&
IFF_UP
))
return
0
;
return
0
;
/*
/*
...
@@ -818,7 +810,6 @@ int dev_close(struct net_device *dev)
...
@@ -818,7 +810,6 @@ int dev_close(struct net_device *dev)
* We allow it to be called even after a DETACH hot-plug
* We allow it to be called even after a DETACH hot-plug
* event.
* event.
*/
*/
if
(
dev
->
stop
)
if
(
dev
->
stop
)
dev
->
stop
(
dev
);
dev
->
stop
(
dev
);
...
@@ -842,7 +833,7 @@ int dev_close(struct net_device *dev)
...
@@ -842,7 +833,7 @@ int dev_close(struct net_device *dev)
if
(
dev
->
owner
)
if
(
dev
->
owner
)
__MOD_DEC_USE_COUNT
(
dev
->
owner
);
__MOD_DEC_USE_COUNT
(
dev
->
owner
);
return
(
0
)
;
return
0
;
}
}
...
@@ -878,7 +869,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
...
@@ -878,7 +869,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
int
unregister_netdevice_notifier
(
struct
notifier_block
*
nb
)
int
unregister_netdevice_notifier
(
struct
notifier_block
*
nb
)
{
{
return
notifier_chain_unregister
(
&
netdev_chain
,
nb
);
return
notifier_chain_unregister
(
&
netdev_chain
,
nb
);
}
}
/*
/*
...
@@ -892,16 +883,14 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
...
@@ -892,16 +883,14 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
do_gettimeofday
(
&
skb
->
stamp
);
do_gettimeofday
(
&
skb
->
stamp
);
br_read_lock
(
BR_NETPROTO_LOCK
);
br_read_lock
(
BR_NETPROTO_LOCK
);
for
(
ptype
=
ptype_all
;
ptype
!=
NULL
;
ptype
=
ptype
->
next
)
for
(
ptype
=
ptype_all
;
ptype
;
ptype
=
ptype
->
next
)
{
{
/* Never send packets back to the socket
/* Never send packets back to the socket
* they originated from - MvS (miquels@drinkel.ow.org)
* they originated from - MvS (miquels@drinkel.ow.org)
*/
*/
if
((
ptype
->
dev
==
dev
||
!
ptype
->
dev
)
&&
if
((
ptype
->
dev
==
dev
||
!
ptype
->
dev
)
&&
((
struct
sock
*
)
ptype
->
data
!=
skb
->
sk
))
(
struct
sock
*
)
ptype
->
data
!=
skb
->
sk
)
{
{
struct
sk_buff
*
skb2
=
skb_clone
(
skb
,
GFP_ATOMIC
);
struct
sk_buff
*
skb2
;
if
(
!
skb2
)
if
((
skb2
=
skb_clone
(
skb
,
GFP_ATOMIC
))
==
NULL
)
break
;
break
;
/* skb->nh should be correctly
/* skb->nh should be correctly
...
@@ -910,9 +899,12 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
...
@@ -910,9 +899,12 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
*/
*/
skb2
->
mac
.
raw
=
skb2
->
data
;
skb2
->
mac
.
raw
=
skb2
->
data
;
if
(
skb2
->
nh
.
raw
<
skb2
->
data
||
skb2
->
nh
.
raw
>
skb2
->
tail
)
{
if
(
skb2
->
nh
.
raw
<
skb2
->
data
||
skb2
->
nh
.
raw
>
skb2
->
tail
)
{
if
(
net_ratelimit
())
if
(
net_ratelimit
())
printk
(
KERN_DEBUG
"protocol %04x is buggy, dev %s
\n
"
,
skb2
->
protocol
,
dev
->
name
);
printk
(
KERN_DEBUG
"protocol %04x is "
"buggy, dev %s
\n
"
,
skb2
->
protocol
,
dev
->
name
);
skb2
->
nh
.
raw
=
skb2
->
data
;
skb2
->
nh
.
raw
=
skb2
->
data
;
}
}
...
@@ -928,12 +920,11 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
...
@@ -928,12 +920,11 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
* If it failed by some reason, ignore and send skb with wrong
* If it failed by some reason, ignore and send skb with wrong
* checksum.
* checksum.
*/
*/
struct
sk_buff
*
skb_checksum_help
(
struct
sk_buff
*
skb
)
struct
sk_buff
*
skb_checksum_help
(
struct
sk_buff
*
skb
)
{
{
int
offset
;
unsigned
int
csum
;
unsigned
int
csum
;
int
offset
=
skb
->
h
.
raw
-
skb
->
data
;
offset
=
skb
->
h
.
raw
-
skb
->
data
;
if
(
offset
>
(
int
)
skb
->
len
)
if
(
offset
>
(
int
)
skb
->
len
)
BUG
();
BUG
();
csum
=
skb_checksum
(
skb
,
offset
,
skb
->
len
-
offset
,
0
);
csum
=
skb_checksum
(
skb
,
offset
,
skb
->
len
-
offset
,
0
);
...
@@ -941,7 +932,7 @@ struct sk_buff * skb_checksum_help(struct sk_buff *skb)
...
@@ -941,7 +932,7 @@ struct sk_buff * skb_checksum_help(struct sk_buff *skb)
offset
=
skb
->
tail
-
skb
->
h
.
raw
;
offset
=
skb
->
tail
-
skb
->
h
.
raw
;
if
(
offset
<=
0
)
if
(
offset
<=
0
)
BUG
();
BUG
();
if
(
skb
->
csum
+
2
>
offset
)
if
(
skb
->
csum
+
2
>
offset
)
BUG
();
BUG
();
*
(
u16
*
)(
skb
->
h
.
raw
+
skb
->
csum
)
=
csum_fold
(
csum
);
*
(
u16
*
)(
skb
->
h
.
raw
+
skb
->
csum
)
=
csum_fold
(
csum
);
...
@@ -955,15 +946,14 @@ struct sk_buff * skb_checksum_help(struct sk_buff *skb)
...
@@ -955,15 +946,14 @@ struct sk_buff * skb_checksum_help(struct sk_buff *skb)
* 2. No high memory really exists on this machine.
* 2. No high memory really exists on this machine.
*/
*/
static
inline
int
static
inline
int
illegal_highdma
(
struct
net_device
*
dev
,
struct
sk_buff
*
skb
)
illegal_highdma
(
struct
net_device
*
dev
,
struct
sk_buff
*
skb
)
{
{
int
i
;
int
i
;
if
(
dev
->
features
&
NETIF_F_HIGHDMA
)
if
(
dev
->
features
&
NETIF_F_HIGHDMA
)
return
0
;
return
0
;
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
for
(
i
=
0
;
i
<
skb_shinfo
(
skb
)
->
nr_frags
;
i
++
)
if
(
skb_shinfo
(
skb
)
->
frags
[
i
].
page
>=
highmem_start_page
)
if
(
skb_shinfo
(
skb
)
->
frags
[
i
].
page
>=
highmem_start_page
)
return
1
;
return
1
;
...
@@ -978,8 +968,8 @@ illegal_highdma(struct net_device *dev, struct sk_buff *skb)
...
@@ -978,8 +968,8 @@ illegal_highdma(struct net_device *dev, struct sk_buff *skb)
* @skb: buffer to transmit
* @skb: buffer to transmit
*
*
* Queue a buffer for transmission to a network device. The caller must
* Queue a buffer for transmission to a network device. The caller must
* have set the device and priority and built the buffer before calling
this
* have set the device and priority and built the buffer before calling
* function. The function can be called from an interrupt.
*
this
function. The function can be called from an interrupt.
*
*
* A negative errno code is returned on a failure. A success does not
* A negative errno code is returned on a failure. A success does not
* guarantee the frame will be transmitted as it may be dropped due
* guarantee the frame will be transmitted as it may be dropped due
...
@@ -990,60 +980,59 @@ int dev_queue_xmit(struct sk_buff *skb)
...
@@ -990,60 +980,59 @@ int dev_queue_xmit(struct sk_buff *skb)
{
{
struct
net_device
*
dev
=
skb
->
dev
;
struct
net_device
*
dev
=
skb
->
dev
;
struct
Qdisc
*
q
;
struct
Qdisc
*
q
;
int
rc
=
-
ENOMEM
;
if
(
skb_shinfo
(
skb
)
->
frag_list
&&
if
(
skb_shinfo
(
skb
)
->
frag_list
&&
!
(
dev
->
features
&
NETIF_F_FRAGLIST
)
&&
!
(
dev
->
features
&
NETIF_F_FRAGLIST
)
&&
skb_linearize
(
skb
,
GFP_ATOMIC
)
!=
0
)
{
skb_linearize
(
skb
,
GFP_ATOMIC
))
kfree_skb
(
skb
);
goto
out_kfree_skb
;
return
-
ENOMEM
;
}
/* Fragmented skb is linearized if device does not support SG,
/* Fragmented skb is linearized if device does not support SG,
* or if at least one of fragments is in highmem and device
* or if at least one of fragments is in highmem and device
* does not support DMA from it.
* does not support DMA from it.
*/
*/
if
(
skb_shinfo
(
skb
)
->
nr_frags
&&
if
(
skb_shinfo
(
skb
)
->
nr_frags
&&
(
!
(
dev
->
features
&
NETIF_F_SG
)
||
illegal_highdma
(
dev
,
skb
))
&&
(
!
(
dev
->
features
&
NETIF_F_SG
)
||
illegal_highdma
(
dev
,
skb
))
&&
skb_linearize
(
skb
,
GFP_ATOMIC
)
!=
0
)
{
skb_linearize
(
skb
,
GFP_ATOMIC
))
kfree_skb
(
skb
);
goto
out_kfree_skb
;
return
-
ENOMEM
;
}
/* If packet is not checksummed and device does not support
/* If packet is not checksummed and device does not support
* checksumming for this protocol, complete checksumming here.
* checksumming for this protocol, complete checksumming here.
*/
*/
if
(
skb
->
ip_summed
==
CHECKSUM_HW
&&
if
(
skb
->
ip_summed
==
CHECKSUM_HW
&&
(
!
(
dev
->
features
&
(
NETIF_F_HW_CSUM
|
NETIF_F_NO_CSUM
))
&&
(
!
(
dev
->
features
&
(
NETIF_F_HW_CSUM
|
NETIF_F_NO_CSUM
))
&&
(
!
(
dev
->
features
&
NETIF_F_IP_CSUM
)
||
(
!
(
dev
->
features
&
NETIF_F_IP_CSUM
)
||
skb
->
protocol
!=
htons
(
ETH_P_IP
))))
{
skb
->
protocol
!=
htons
(
ETH_P_IP
))))
{
if
((
skb
=
skb_checksum_help
(
skb
))
==
NULL
)
if
((
skb
=
skb_checksum_help
(
skb
))
==
NULL
)
return
-
ENOMEM
;
goto
out
;
}
}
/* Grab device queue */
/* Grab device queue */
spin_lock_bh
(
&
dev
->
queue_lock
);
spin_lock_bh
(
&
dev
->
queue_lock
);
q
=
dev
->
qdisc
;
q
=
dev
->
qdisc
;
if
(
q
->
enqueue
)
{
if
(
q
->
enqueue
)
{
int
ret
=
q
->
enqueue
(
skb
,
q
);
rc
=
q
->
enqueue
(
skb
,
q
);
qdisc_run
(
dev
);
qdisc_run
(
dev
);
spin_unlock_bh
(
&
dev
->
queue_lock
);
spin_unlock_bh
(
&
dev
->
queue_lock
);
return
ret
==
NET_XMIT_BYPASS
?
NET_XMIT_SUCCESS
:
ret
;
rc
=
rc
==
NET_XMIT_BYPASS
?
NET_XMIT_SUCCESS
:
rc
;
goto
out
;
}
}
/* The device has no queue. Common case for software devices:
/* The device has no queue. Common case for software devices:
loopback, all the sorts of tunnels...
loopback, all the sorts of tunnels...
Really, it is unlikely that xmit_lock protection is necessary here.
Really, it is unlikely that xmit_lock protection is necessary here.
(f.e. loopback and IP tunnels are clean ignoring statistics counters.)
(f.e. loopback and IP tunnels are clean ignoring statistics
counters.)
However, it is possible, that they rely on protection
However, it is possible, that they rely on protection
made by us here.
made by us here.
Check this and shot the lock. It is not prone from deadlocks.
Check this and shot the lock. It is not prone from deadlocks.
Either shot noqueue qdisc, it is even simpler 8)
Either shot noqueue qdisc, it is even simpler 8)
*/
*/
if
(
dev
->
flags
&
IFF_UP
)
{
if
(
dev
->
flags
&
IFF_UP
)
{
int
cpu
=
smp_processor_id
();
int
cpu
=
smp_processor_id
();
if
(
dev
->
xmit_lock_owner
!=
cpu
)
{
if
(
dev
->
xmit_lock_owner
!=
cpu
)
{
...
@@ -1059,30 +1048,36 @@ int dev_queue_xmit(struct sk_buff *skb)
...
@@ -1059,30 +1048,36 @@ int dev_queue_xmit(struct sk_buff *skb)
if
(
!
netif_queue_stopped
(
dev
))
{
if
(
!
netif_queue_stopped
(
dev
))
{
if
(
netdev_nit
)
if
(
netdev_nit
)
dev_queue_xmit_nit
(
skb
,
dev
);
dev_queue_xmit_nit
(
skb
,
dev
);
if
(
dev
->
hard_start_xmit
(
skb
,
dev
)
==
0
)
{
rc
=
0
;
if
(
!
dev
->
hard_start_xmit
(
skb
,
dev
))
{
dev
->
xmit_lock_owner
=
-
1
;
dev
->
xmit_lock_owner
=
-
1
;
spin_unlock_bh
(
&
dev
->
xmit_lock
);
spin_unlock_bh
(
&
dev
->
xmit_lock
);
return
0
;
goto
out
;
}
}
}
}
dev
->
xmit_lock_owner
=
-
1
;
dev
->
xmit_lock_owner
=
-
1
;
spin_unlock_bh
(
&
dev
->
xmit_lock
);
spin_unlock_bh
(
&
dev
->
xmit_lock
);
if
(
net_ratelimit
())
if
(
net_ratelimit
())
printk
(
KERN_DEBUG
"Virtual device %s asks to
queue packet!
\n
"
,
dev
->
name
);
printk
(
KERN_DEBUG
"Virtual device %s asks to
"
kfree_skb
(
skb
);
"queue packet!
\n
"
,
dev
->
name
);
return
-
ENETDOWN
;
goto
out_enetdown
;
}
else
{
}
else
{
/* Recursion is detected! It is possible, unfortunately */
/* Recursion is detected! It is possible,
* unfortunately */
if
(
net_ratelimit
())
if
(
net_ratelimit
())
printk
(
KERN_DEBUG
"Dead loop on virtual device %s, fix it urgently!
\n
"
,
dev
->
name
);
printk
(
KERN_DEBUG
"Dead loop on virtual device "
"%s, fix it urgently!
\n
"
,
dev
->
name
);
}
}
}
}
spin_unlock_bh
(
&
dev
->
queue_lock
);
spin_unlock_bh
(
&
dev
->
queue_lock
);
out_enetdown:
rc
=
-
ENETDOWN
;
out_kfree_skb:
kfree_skb
(
skb
);
kfree_skb
(
skb
);
return
-
ENETDOWN
;
out:
return
rc
;
}
}
...
@@ -1107,7 +1102,7 @@ struct netif_rx_stats netdev_rx_stat[NR_CPUS];
...
@@ -1107,7 +1102,7 @@ struct netif_rx_stats netdev_rx_stat[NR_CPUS];
#ifdef CONFIG_NET_HW_FLOWCONTROL
#ifdef CONFIG_NET_HW_FLOWCONTROL
atomic_t
netdev_dropping
=
ATOMIC_INIT
(
0
);
atomic_t
netdev_dropping
=
ATOMIC_INIT
(
0
);
static
unsigned
long
netdev_fc_mask
=
1
;
static
unsigned
long
netdev_fc_mask
=
1
;
unsigned
long
netdev_fc_xoff
=
0
;
unsigned
long
netdev_fc_xoff
;
spinlock_t
netdev_fc_lock
=
SPIN_LOCK_UNLOCKED
;
spinlock_t
netdev_fc_lock
=
SPIN_LOCK_UNLOCKED
;
static
struct
static
struct
...
@@ -1116,7 +1111,8 @@ static struct
...
@@ -1116,7 +1111,8 @@ static struct
struct
net_device
*
dev
;
struct
net_device
*
dev
;
}
netdev_fc_slots
[
BITS_PER_LONG
];
}
netdev_fc_slots
[
BITS_PER_LONG
];
int
netdev_register_fc
(
struct
net_device
*
dev
,
void
(
*
stimul
)(
struct
net_device
*
dev
))
int
netdev_register_fc
(
struct
net_device
*
dev
,
void
(
*
stimul
)(
struct
net_device
*
dev
))
{
{
int
bit
=
0
;
int
bit
=
0
;
unsigned
long
flags
;
unsigned
long
flags
;
...
@@ -1156,7 +1152,7 @@ static void netdev_wakeup(void)
...
@@ -1156,7 +1152,7 @@ static void netdev_wakeup(void)
netdev_fc_xoff
=
0
;
netdev_fc_xoff
=
0
;
while
(
xoff
)
{
while
(
xoff
)
{
int
i
=
ffz
(
~
xoff
);
int
i
=
ffz
(
~
xoff
);
xoff
&=
~
(
1
<<
i
);
xoff
&=
~
(
1
<<
i
);
netdev_fc_slots
[
i
].
stimul
(
netdev_fc_slots
[
i
].
dev
);
netdev_fc_slots
[
i
].
stimul
(
netdev_fc_slots
[
i
].
dev
);
}
}
spin_unlock
(
&
netdev_fc_lock
);
spin_unlock
(
&
netdev_fc_lock
);
...
@@ -1172,7 +1168,7 @@ static void get_sample_stats(int cpu)
...
@@ -1172,7 +1168,7 @@ static void get_sample_stats(int cpu)
int
blog
=
softnet_data
[
cpu
].
input_pkt_queue
.
qlen
;
int
blog
=
softnet_data
[
cpu
].
input_pkt_queue
.
qlen
;
int
avg_blog
=
softnet_data
[
cpu
].
avg_blog
;
int
avg_blog
=
softnet_data
[
cpu
].
avg_blog
;
avg_blog
=
(
avg_blog
>>
1
)
+
(
blog
>>
1
);
avg_blog
=
(
avg_blog
>>
1
)
+
(
blog
>>
1
);
if
(
avg_blog
>
mod_cong
)
{
if
(
avg_blog
>
mod_cong
)
{
/* Above moderate congestion levels. */
/* Above moderate congestion levels. */
...
@@ -1229,7 +1225,6 @@ static void sample_queue(unsigned long dummy)
...
@@ -1229,7 +1225,6 @@ static void sample_queue(unsigned long dummy)
* NET_RX_CN_HIGH (high congestion)
* NET_RX_CN_HIGH (high congestion)
* NET_RX_DROP (packet was dropped)
* NET_RX_DROP (packet was dropped)
*
*
*
*/
*/
int
netif_rx
(
struct
sk_buff
*
skb
)
int
netif_rx
(
struct
sk_buff
*
skb
)
...
@@ -1238,7 +1233,7 @@ int netif_rx(struct sk_buff *skb)
...
@@ -1238,7 +1233,7 @@ int netif_rx(struct sk_buff *skb)
struct
softnet_data
*
queue
;
struct
softnet_data
*
queue
;
unsigned
long
flags
;
unsigned
long
flags
;
if
(
skb
->
stamp
.
tv_sec
==
0
)
if
(
!
skb
->
stamp
.
tv_sec
)
do_gettimeofday
(
&
skb
->
stamp
);
do_gettimeofday
(
&
skb
->
stamp
);
/* The code is rearranged so that the path is the most
/* The code is rearranged so that the path is the most
...
@@ -1256,7 +1251,7 @@ int netif_rx(struct sk_buff *skb)
...
@@ -1256,7 +1251,7 @@ int netif_rx(struct sk_buff *skb)
enqueue:
enqueue:
dev_hold
(
skb
->
dev
);
dev_hold
(
skb
->
dev
);
__skb_queue_tail
(
&
queue
->
input_pkt_queue
,
skb
);
__skb_queue_tail
(
&
queue
->
input_pkt_queue
,
skb
);
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
#ifndef OFFLINE_SAMPLE
#ifndef OFFLINE_SAMPLE
get_sample_stats
(
this_cpu
);
get_sample_stats
(
this_cpu
);
...
@@ -1276,7 +1271,7 @@ int netif_rx(struct sk_buff *skb)
...
@@ -1276,7 +1271,7 @@ int netif_rx(struct sk_buff *skb)
goto
enqueue
;
goto
enqueue
;
}
}
if
(
queue
->
throttle
==
0
)
{
if
(
!
queue
->
throttle
)
{
queue
->
throttle
=
1
;
queue
->
throttle
=
1
;
netdev_rx_stat
[
this_cpu
].
throttled
++
;
netdev_rx_stat
[
this_cpu
].
throttled
++
;
#ifdef CONFIG_NET_HW_FLOWCONTROL
#ifdef CONFIG_NET_HW_FLOWCONTROL
...
@@ -1295,21 +1290,19 @@ int netif_rx(struct sk_buff *skb)
...
@@ -1295,21 +1290,19 @@ int netif_rx(struct sk_buff *skb)
/* Deliver skb to an old protocol, which is not threaded well
/* Deliver skb to an old protocol, which is not threaded well
or which do not understand shared skbs.
or which do not understand shared skbs.
*/
*/
static
int
deliver_to_old_ones
(
struct
packet_type
*
pt
,
struct
sk_buff
*
skb
,
int
last
)
static
int
deliver_to_old_ones
(
struct
packet_type
*
pt
,
struct
sk_buff
*
skb
,
int
last
)
{
{
static
spinlock_t
net_bh_lock
=
SPIN_LOCK_UNLOCKED
;
static
spinlock_t
net_bh_lock
=
SPIN_LOCK_UNLOCKED
;
int
ret
=
NET_RX_DROP
;
int
ret
=
NET_RX_DROP
;
if
(
!
last
)
{
if
(
!
last
)
{
skb
=
skb_clone
(
skb
,
GFP_ATOMIC
);
skb
=
skb_clone
(
skb
,
GFP_ATOMIC
);
if
(
skb
==
NULL
)
if
(
!
skb
)
return
ret
;
goto
out
;
}
if
(
skb_is_nonlinear
(
skb
)
&&
skb_linearize
(
skb
,
GFP_ATOMIC
)
!=
0
)
{
kfree_skb
(
skb
);
return
ret
;
}
}
if
(
skb_is_nonlinear
(
skb
)
&&
skb_linearize
(
skb
,
GFP_ATOMIC
))
goto
out_kfree
;
/* The assumption (correct one) is that old protocols
/* The assumption (correct one) is that old protocols
did not depened on BHs different of NET_BH and TIMER_BH.
did not depened on BHs different of NET_BH and TIMER_BH.
...
@@ -1325,7 +1318,11 @@ static int deliver_to_old_ones(struct packet_type *pt, struct sk_buff *skb, int
...
@@ -1325,7 +1318,11 @@ static int deliver_to_old_ones(struct packet_type *pt, struct sk_buff *skb, int
tasklet_hi_enable
(
bh_task_vec
+
TIMER_BH
);
tasklet_hi_enable
(
bh_task_vec
+
TIMER_BH
);
spin_unlock
(
&
net_bh_lock
);
spin_unlock
(
&
net_bh_lock
);
out:
return
ret
;
return
ret
;
out_kfree:
kfree_skb
(
skb
);
goto
out
;
}
}
static
__inline__
void
skb_bond
(
struct
sk_buff
*
skb
)
static
__inline__
void
skb_bond
(
struct
sk_buff
*
skb
)
...
@@ -1348,11 +1345,11 @@ static void net_tx_action(struct softirq_action *h)
...
@@ -1348,11 +1345,11 @@ static void net_tx_action(struct softirq_action *h)
softnet_data
[
cpu
].
completion_queue
=
NULL
;
softnet_data
[
cpu
].
completion_queue
=
NULL
;
local_irq_enable
();
local_irq_enable
();
while
(
clist
!=
NULL
)
{
while
(
clist
)
{
struct
sk_buff
*
skb
=
clist
;
struct
sk_buff
*
skb
=
clist
;
clist
=
clist
->
next
;
clist
=
clist
->
next
;
BUG_TRAP
(
atomic_read
(
&
skb
->
users
)
==
0
);
BUG_TRAP
(
!
atomic_read
(
&
skb
->
users
)
);
__kfree_skb
(
skb
);
__kfree_skb
(
skb
);
}
}
}
}
...
@@ -1365,7 +1362,7 @@ static void net_tx_action(struct softirq_action *h)
...
@@ -1365,7 +1362,7 @@ static void net_tx_action(struct softirq_action *h)
softnet_data
[
cpu
].
output_queue
=
NULL
;
softnet_data
[
cpu
].
output_queue
=
NULL
;
local_irq_enable
();
local_irq_enable
();
while
(
head
!=
NULL
)
{
while
(
head
)
{
struct
net_device
*
dev
=
head
;
struct
net_device
*
dev
=
head
;
head
=
head
->
next_sched
;
head
=
head
->
next_sched
;
...
@@ -1389,7 +1386,6 @@ static void net_tx_action(struct softirq_action *h)
...
@@ -1389,7 +1386,6 @@ static void net_tx_action(struct softirq_action *h)
* Make a function call that is atomic with respect to the protocol
* Make a function call that is atomic with respect to the protocol
* layers.
* layers.
*/
*/
void
net_call_rx_atomic
(
void
(
*
fn
)(
void
))
void
net_call_rx_atomic
(
void
(
*
fn
)(
void
))
{
{
br_write_lock_bh
(
BR_NETPROTO_LOCK
);
br_write_lock_bh
(
BR_NETPROTO_LOCK
);
...
@@ -1421,11 +1417,12 @@ static __inline__ int handle_bridge(struct sk_buff *skb,
...
@@ -1421,11 +1417,12 @@ static __inline__ int handle_bridge(struct sk_buff *skb,
#ifdef CONFIG_NET_DIVERT
#ifdef CONFIG_NET_DIVERT
static
inline
void
handle_diverter
(
struct
sk_buff
*
skb
)
static
inline
int
handle_diverter
(
struct
sk_buff
*
skb
)
{
{
/* if diversion is supported on device, then divert */
/* if diversion is supported on device, then divert */
if
(
skb
->
dev
->
divert
&&
skb
->
dev
->
divert
->
divert
)
if
(
skb
->
dev
->
divert
&&
skb
->
dev
->
divert
->
divert
)
divert_frame
(
skb
);
divert_frame
(
skb
);
return
0
;
}
}
#endif
/* CONFIG_NET_DIVERT */
#endif
/* CONFIG_NET_DIVERT */
...
@@ -1435,7 +1432,7 @@ int netif_receive_skb(struct sk_buff *skb)
...
@@ -1435,7 +1432,7 @@ int netif_receive_skb(struct sk_buff *skb)
int
ret
=
NET_RX_DROP
;
int
ret
=
NET_RX_DROP
;
unsigned
short
type
=
skb
->
protocol
;
unsigned
short
type
=
skb
->
protocol
;
if
(
skb
->
stamp
.
tv_sec
==
0
)
if
(
!
skb
->
stamp
.
tv_sec
)
do_gettimeofday
(
&
skb
->
stamp
);
do_gettimeofday
(
&
skb
->
stamp
);
skb_bond
(
skb
);
skb_bond
(
skb
);
...
@@ -1456,10 +1453,12 @@ int netif_receive_skb(struct sk_buff *skb)
...
@@ -1456,10 +1453,12 @@ int netif_receive_skb(struct sk_buff *skb)
if
(
!
ptype
->
dev
||
ptype
->
dev
==
skb
->
dev
)
{
if
(
!
ptype
->
dev
||
ptype
->
dev
==
skb
->
dev
)
{
if
(
pt_prev
)
{
if
(
pt_prev
)
{
if
(
!
pt_prev
->
data
)
{
if
(
!
pt_prev
->
data
)
{
ret
=
deliver_to_old_ones
(
pt_prev
,
skb
,
0
);
ret
=
deliver_to_old_ones
(
pt_prev
,
skb
,
0
);
}
else
{
}
else
{
atomic_inc
(
&
skb
->
users
);
atomic_inc
(
&
skb
->
users
);
ret
=
pt_prev
->
func
(
skb
,
skb
->
dev
,
pt_prev
);
ret
=
pt_prev
->
func
(
skb
,
skb
->
dev
,
pt_prev
);
}
}
}
}
pt_prev
=
ptype
;
pt_prev
=
ptype
;
...
@@ -1472,21 +1471,22 @@ int netif_receive_skb(struct sk_buff *skb)
...
@@ -1472,21 +1471,22 @@ int netif_receive_skb(struct sk_buff *skb)
#endif
/* CONFIG_NET_DIVERT */
#endif
/* CONFIG_NET_DIVERT */
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
if
(
skb
->
dev
->
br_port
!=
NULL
&&
if
(
skb
->
dev
->
br_port
&&
br_handle_frame_hook
)
{
br_handle_frame_hook
!=
NULL
)
{
return
handle_bridge
(
skb
,
pt_prev
);
return
handle_bridge
(
skb
,
pt_prev
);
}
}
#endif
#endif
for
(
ptype
=
ptype_base
[
ntohs
(
type
)
&
15
];
ptype
;
ptype
=
ptype
->
next
)
{
for
(
ptype
=
ptype_base
[
ntohs
(
type
)
&
15
];
ptype
;
ptype
=
ptype
->
next
)
{
if
(
ptype
->
type
==
type
&&
if
(
ptype
->
type
==
type
&&
(
!
ptype
->
dev
||
ptype
->
dev
==
skb
->
dev
))
{
(
!
ptype
->
dev
||
ptype
->
dev
==
skb
->
dev
))
{
if
(
pt_prev
)
{
if
(
pt_prev
)
{
if
(
!
pt_prev
->
data
)
{
if
(
!
pt_prev
->
data
)
{
ret
=
deliver_to_old_ones
(
pt_prev
,
skb
,
0
);
ret
=
deliver_to_old_ones
(
pt_prev
,
skb
,
0
);
}
else
{
}
else
{
atomic_inc
(
&
skb
->
users
);
atomic_inc
(
&
skb
->
users
);
ret
=
pt_prev
->
func
(
skb
,
skb
->
dev
,
pt_prev
);
ret
=
pt_prev
->
func
(
skb
,
skb
->
dev
,
pt_prev
);
}
}
}
}
pt_prev
=
ptype
;
pt_prev
=
ptype
;
...
@@ -1524,7 +1524,7 @@ static int process_backlog(struct net_device *backlog_dev, int *budget)
...
@@ -1524,7 +1524,7 @@ static int process_backlog(struct net_device *backlog_dev, int *budget)
local_irq_disable
();
local_irq_disable
();
skb
=
__skb_dequeue
(
&
queue
->
input_pkt_queue
);
skb
=
__skb_dequeue
(
&
queue
->
input_pkt_queue
);
if
(
skb
==
NULL
)
if
(
!
skb
)
goto
job_done
;
goto
job_done
;
local_irq_enable
();
local_irq_enable
();
...
@@ -1540,7 +1540,8 @@ static int process_backlog(struct net_device *backlog_dev, int *budget)
...
@@ -1540,7 +1540,8 @@ static int process_backlog(struct net_device *backlog_dev, int *budget)
break
;
break
;
#ifdef CONFIG_NET_HW_FLOWCONTROL
#ifdef CONFIG_NET_HW_FLOWCONTROL
if
(
queue
->
throttle
&&
queue
->
input_pkt_queue
.
qlen
<
no_cong_thresh
)
{
if
(
queue
->
throttle
&&
queue
->
input_pkt_queue
.
qlen
<
no_cong_thresh
)
{
if
(
atomic_dec_and_test
(
&
netdev_dropping
))
{
if
(
atomic_dec_and_test
(
&
netdev_dropping
))
{
queue
->
throttle
=
0
;
queue
->
throttle
=
0
;
netdev_wakeup
();
netdev_wakeup
();
...
@@ -1590,7 +1591,8 @@ static void net_rx_action(struct softirq_action *h)
...
@@ -1590,7 +1591,8 @@ static void net_rx_action(struct softirq_action *h)
local_irq_enable
();
local_irq_enable
();
dev
=
list_entry
(
queue
->
poll_list
.
next
,
struct
net_device
,
poll_list
);
dev
=
list_entry
(
queue
->
poll_list
.
next
,
struct
net_device
,
poll_list
);
if
(
dev
->
quota
<=
0
||
dev
->
poll
(
dev
,
&
budget
))
{
if
(
dev
->
quota
<=
0
||
dev
->
poll
(
dev
,
&
budget
))
{
local_irq_disable
();
local_irq_disable
();
...
@@ -1605,7 +1607,7 @@ static void net_rx_action(struct softirq_action *h)
...
@@ -1605,7 +1607,7 @@ static void net_rx_action(struct softirq_action *h)
local_irq_disable
();
local_irq_disable
();
}
}
}
}
out:
local_irq_enable
();
local_irq_enable
();
br_read_unlock
(
BR_NETPROTO_LOCK
);
br_read_unlock
(
BR_NETPROTO_LOCK
);
return
;
return
;
...
@@ -1613,9 +1615,7 @@ static void net_rx_action(struct softirq_action *h)
...
@@ -1613,9 +1615,7 @@ static void net_rx_action(struct softirq_action *h)
softnet_break:
softnet_break:
netdev_rx_stat
[
this_cpu
].
time_squeeze
++
;
netdev_rx_stat
[
this_cpu
].
time_squeeze
++
;
__cpu_raise_softirq
(
this_cpu
,
NET_RX_SOFTIRQ
);
__cpu_raise_softirq
(
this_cpu
,
NET_RX_SOFTIRQ
);
goto
out
;
local_irq_enable
();
br_read_unlock
(
BR_NETPROTO_LOCK
);
}
}
static
gifconf_func_t
*
gifconf_list
[
NPROTO
];
static
gifconf_func_t
*
gifconf_list
[
NPROTO
];
...
@@ -1629,10 +1629,9 @@ static gifconf_func_t * gifconf_list [NPROTO];
...
@@ -1629,10 +1629,9 @@ static gifconf_func_t * gifconf_list [NPROTO];
* that is passed must not be freed or reused until it has been replaced
* that is passed must not be freed or reused until it has been replaced
* by another handler.
* by another handler.
*/
*/
int
register_gifconf
(
unsigned
int
family
,
gifconf_func_t
*
gifconf
)
int
register_gifconf
(
unsigned
int
family
,
gifconf_func_t
*
gifconf
)
{
{
if
(
family
>=
NPROTO
)
if
(
family
>=
NPROTO
)
return
-
EINVAL
;
return
-
EINVAL
;
gifconf_list
[
family
]
=
gifconf
;
gifconf_list
[
family
]
=
gifconf
;
return
0
;
return
0
;
...
@@ -1707,18 +1706,17 @@ static int dev_ifconf(char *arg)
...
@@ -1707,18 +1706,17 @@ static int dev_ifconf(char *arg)
*/
*/
total
=
0
;
total
=
0
;
for
(
dev
=
dev_base
;
dev
!=
NULL
;
dev
=
dev
->
next
)
{
for
(
dev
=
dev_base
;
dev
;
dev
=
dev
->
next
)
{
for
(
i
=
0
;
i
<
NPROTO
;
i
++
)
{
for
(
i
=
0
;
i
<
NPROTO
;
i
++
)
{
if
(
gifconf_list
[
i
])
{
if
(
gifconf_list
[
i
])
{
int
done
;
int
done
;
if
(
pos
==
NULL
)
{
if
(
!
pos
)
done
=
gifconf_list
[
i
](
dev
,
NULL
,
0
);
done
=
gifconf_list
[
i
](
dev
,
NULL
,
0
);
}
else
{
else
done
=
gifconf_list
[
i
](
dev
,
pos
+
total
,
len
-
total
);
done
=
gifconf_list
[
i
](
dev
,
pos
+
total
,
}
len
-
total
);
if
(
done
<
0
)
{
if
(
done
<
0
)
return
-
EFAULT
;
return
-
EFAULT
;
}
total
+=
done
;
total
+=
done
;
}
}
}
}
...
@@ -1729,13 +1727,10 @@ static int dev_ifconf(char *arg)
...
@@ -1729,13 +1727,10 @@ static int dev_ifconf(char *arg)
*/
*/
ifc
.
ifc_len
=
total
;
ifc
.
ifc_len
=
total
;
if
(
copy_to_user
(
arg
,
&
ifc
,
sizeof
(
struct
ifconf
)))
return
-
EFAULT
;
/*
/*
* Both BSD and Solaris return 0 here, so we do too.
* Both BSD and Solaris return 0 here, so we do too.
*/
*/
return
0
;
return
copy_to_user
(
arg
,
&
ifc
,
sizeof
(
struct
ifconf
))
?
-
EFAULT
:
0
;
}
}
/*
/*
...
@@ -1747,36 +1742,39 @@ static int dev_ifconf(char *arg)
...
@@ -1747,36 +1742,39 @@ static int dev_ifconf(char *arg)
static
int
sprintf_stats
(
char
*
buffer
,
struct
net_device
*
dev
)
static
int
sprintf_stats
(
char
*
buffer
,
struct
net_device
*
dev
)
{
{
struct
net_device_stats
*
stats
=
(
dev
->
get_stats
?
dev
->
get_stats
(
dev
)
:
NULL
);
struct
net_device_stats
*
stats
=
dev
->
get_stats
?
dev
->
get_stats
(
dev
)
:
NULL
;
int
size
;
int
size
;
if
(
stats
)
if
(
stats
)
size
=
sprintf
(
buffer
,
"%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu %8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu
\n
"
,
size
=
sprintf
(
buffer
,
"%6s:%8lu %7lu %4lu %4lu %4lu %5lu "
"%10lu %9lu %8lu %7lu %4lu %4lu %4lu "
"%5lu %7lu %10lu
\n
"
,
dev
->
name
,
dev
->
name
,
stats
->
rx_bytes
,
stats
->
rx_bytes
,
stats
->
rx_packets
,
stats
->
rx_errors
,
stats
->
rx_packets
,
stats
->
rx_errors
,
stats
->
rx_dropped
+
stats
->
rx_missed_errors
,
stats
->
rx_dropped
+
stats
->
rx_missed_errors
,
stats
->
rx_fifo_errors
,
stats
->
rx_fifo_errors
,
stats
->
rx_length_errors
+
stats
->
rx_over_errors
stats
->
rx_length_errors
+
stats
->
rx_over_errors
+
+
stats
->
rx_crc_errors
+
stats
->
rx_frame_errors
,
stats
->
rx_crc_errors
+
stats
->
rx_frame_errors
,
stats
->
rx_compressed
,
stats
->
multicast
,
stats
->
rx_compressed
,
stats
->
multicast
,
stats
->
tx_bytes
,
stats
->
tx_bytes
,
stats
->
tx_packets
,
stats
->
tx_errors
,
stats
->
tx_dropped
,
stats
->
tx_packets
,
stats
->
tx_errors
,
stats
->
tx_dropped
,
stats
->
tx_fifo_errors
,
stats
->
collisions
,
stats
->
tx_fifo_errors
,
stats
->
collisions
,
stats
->
tx_carrier_errors
+
stats
->
tx_aborted_errors
stats
->
tx_carrier_errors
+
stats
->
tx_aborted_errors
+
+
stats
->
tx_window_errors
+
stats
->
tx_heartbeat_errors
,
stats
->
tx_window_errors
+
stats
->
tx_heartbeat_errors
,
stats
->
tx_compressed
);
stats
->
tx_compressed
);
else
else
size
=
sprintf
(
buffer
,
"%6s: No statistics available.
\n
"
,
dev
->
name
);
size
=
sprintf
(
buffer
,
"%6s: No statistics available.
\n
"
,
dev
->
name
);
return
size
;
return
size
;
}
}
/*
/*
* Called from the PROCfs module. This now uses the new arbitrary sized
/proc/net interface
* Called from the PROCfs module. This now uses the new arbitrary sized
* to create /proc/net/dev
*
/proc/net interface
to create /proc/net/dev
*/
*/
static
int
dev_get_info
(
char
*
buffer
,
char
**
start
,
off_t
offset
,
int
length
)
static
int
dev_get_info
(
char
*
buffer
,
char
**
start
,
off_t
offset
,
int
length
)
{
{
int
len
=
0
;
int
len
=
0
;
...
@@ -1785,7 +1783,6 @@ static int dev_get_info(char *buffer, char **start, off_t offset, int length)
...
@@ -1785,7 +1783,6 @@ static int dev_get_info(char *buffer, char **start, off_t offset, int length)
int
size
;
int
size
;
struct
net_device
*
dev
;
struct
net_device
*
dev
;
size
=
sprintf
(
buffer
,
size
=
sprintf
(
buffer
,
"Inter-| Receive | Transmit
\n
"
"Inter-| Receive | Transmit
\n
"
" face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
\n
"
);
" face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
\n
"
);
...
@@ -1793,9 +1790,8 @@ static int dev_get_info(char *buffer, char **start, off_t offset, int length)
...
@@ -1793,9 +1790,8 @@ static int dev_get_info(char *buffer, char **start, off_t offset, int length)
pos
+=
size
;
pos
+=
size
;
len
+=
size
;
len
+=
size
;
read_lock
(
&
dev_base_lock
);
read_lock
(
&
dev_base_lock
);
for
(
dev
=
dev_base
;
dev
!=
NULL
;
dev
=
dev
->
next
)
{
for
(
dev
=
dev_base
;
dev
;
dev
=
dev
->
next
)
{
size
=
sprintf_stats
(
buffer
+
len
,
dev
);
size
=
sprintf_stats
(
buffer
+
len
,
dev
);
len
+=
size
;
len
+=
size
;
pos
=
begin
+
len
;
pos
=
begin
+
len
;
...
@@ -1810,7 +1806,7 @@ static int dev_get_info(char *buffer, char **start, off_t offset, int length)
...
@@ -1810,7 +1806,7 @@ static int dev_get_info(char *buffer, char **start, off_t offset, int length)
read_unlock
(
&
dev_base_lock
);
read_unlock
(
&
dev_base_lock
);
*
start
=
buffer
+
(
offset
-
begin
);
/* Start of wanted data */
*
start
=
buffer
+
(
offset
-
begin
);
/* Start of wanted data */
len
-=
(
offset
-
begin
);
/* Start slop */
len
-=
offset
-
begin
;
/* Start slop */
if
(
len
>
length
)
if
(
len
>
length
)
len
=
length
;
/* Ending slop */
len
=
length
;
/* Ending slop */
if
(
len
<
0
)
if
(
len
<
0
)
...
@@ -1822,11 +1818,12 @@ static int dev_proc_stats(char *buffer, char **start, off_t offset,
...
@@ -1822,11 +1818,12 @@ static int dev_proc_stats(char *buffer, char **start, off_t offset,
int
length
,
int
*
eof
,
void
*
data
)
int
length
,
int
*
eof
,
void
*
data
)
{
{
int
i
,
lcpu
;
int
i
,
lcpu
;
int
len
=
0
;
int
len
=
0
;
for
(
lcpu
=
0
;
lcpu
<
smp_num_cpus
;
lcpu
++
)
{
for
(
lcpu
=
0
;
lcpu
<
smp_num_cpus
;
lcpu
++
)
{
i
=
cpu_logical_map
(
lcpu
);
i
=
cpu_logical_map
(
lcpu
);
len
+=
sprintf
(
buffer
+
len
,
"%08x %08x %08x %08x %08x %08x %08x %08x %08x
\n
"
,
len
+=
sprintf
(
buffer
+
len
,
"%08x %08x %08x %08x %08x %08x "
"%08x %08x %08x
\n
"
,
netdev_rx_stat
[
i
].
total
,
netdev_rx_stat
[
i
].
total
,
netdev_rx_stat
[
i
].
dropped
,
netdev_rx_stat
[
i
].
dropped
,
netdev_rx_stat
[
i
].
time_squeeze
,
netdev_rx_stat
[
i
].
time_squeeze
,
...
@@ -1870,7 +1867,6 @@ static int dev_proc_stats(char *buffer, char **start, off_t offset,
...
@@ -1870,7 +1867,6 @@ static int dev_proc_stats(char *buffer, char **start, off_t offset,
* are adjusted, %RTM_NEWLINK is sent to the routing socket and the
* are adjusted, %RTM_NEWLINK is sent to the routing socket and the
* function returns zero.
* function returns zero.
*/
*/
int
netdev_set_master
(
struct
net_device
*
slave
,
struct
net_device
*
master
)
int
netdev_set_master
(
struct
net_device
*
slave
,
struct
net_device
*
master
)
{
{
struct
net_device
*
old
=
slave
->
master
;
struct
net_device
*
old
=
slave
->
master
;
...
@@ -1909,7 +1905,6 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
...
@@ -1909,7 +1905,6 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
* the device reverts back to normal filtering operation. A negative inc
* the device reverts back to normal filtering operation. A negative inc
* value is used to drop promiscuity on the device.
* value is used to drop promiscuity on the device.
*/
*/
void
dev_set_promiscuity
(
struct
net_device
*
dev
,
int
inc
)
void
dev_set_promiscuity
(
struct
net_device
*
dev
,
int
inc
)
{
{
unsigned
short
old_flags
=
dev
->
flags
;
unsigned
short
old_flags
=
dev
->
flags
;
...
@@ -1917,9 +1912,9 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
...
@@ -1917,9 +1912,9 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
dev
->
flags
|=
IFF_PROMISC
;
dev
->
flags
|=
IFF_PROMISC
;
if
((
dev
->
promiscuity
+=
inc
)
==
0
)
if
((
dev
->
promiscuity
+=
inc
)
==
0
)
dev
->
flags
&=
~
IFF_PROMISC
;
dev
->
flags
&=
~
IFF_PROMISC
;
if
(
dev
->
flags
^
old_flags
)
{
if
(
dev
->
flags
^
old_flags
)
{
#ifdef CONFIG_NET_FASTROUTE
#ifdef CONFIG_NET_FASTROUTE
if
(
dev
->
flags
&
IFF_PROMISC
)
{
if
(
dev
->
flags
&
IFF_PROMISC
)
{
netdev_fastroute_obstacles
++
;
netdev_fastroute_obstacles
++
;
dev_clear_fastroute
(
dev
);
dev_clear_fastroute
(
dev
);
}
else
}
else
...
@@ -1927,7 +1922,8 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
...
@@ -1927,7 +1922,8 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
#endif
#endif
dev_mc_upload
(
dev
);
dev_mc_upload
(
dev
);
printk
(
KERN_INFO
"device %s %s promiscuous mode
\n
"
,
printk
(
KERN_INFO
"device %s %s promiscuous mode
\n
"
,
dev
->
name
,
(
dev
->
flags
&
IFF_PROMISC
)
?
"entered"
:
"left"
);
dev
->
name
,
(
dev
->
flags
&
IFF_PROMISC
)
?
"entered"
:
"left"
);
}
}
}
}
...
@@ -1950,7 +1946,7 @@ void dev_set_allmulti(struct net_device *dev, int inc)
...
@@ -1950,7 +1946,7 @@ void dev_set_allmulti(struct net_device *dev, int inc)
dev
->
flags
|=
IFF_ALLMULTI
;
dev
->
flags
|=
IFF_ALLMULTI
;
if
((
dev
->
allmulti
+=
inc
)
==
0
)
if
((
dev
->
allmulti
+=
inc
)
==
0
)
dev
->
flags
&=
~
IFF_ALLMULTI
;
dev
->
flags
&=
~
IFF_ALLMULTI
;
if
(
dev
->
flags
^
old_flags
)
if
(
dev
->
flags
^
old_flags
)
dev_mc_upload
(
dev
);
dev_mc_upload
(
dev
);
}
}
...
@@ -1963,9 +1959,11 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
...
@@ -1963,9 +1959,11 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
* Set the flags on our device.
* Set the flags on our device.
*/
*/
dev
->
flags
=
(
flags
&
(
IFF_DEBUG
|
IFF_NOTRAILERS
|
IFF_NOARP
|
IFF_DYNAMIC
|
dev
->
flags
=
(
flags
&
(
IFF_DEBUG
|
IFF_NOTRAILERS
|
IFF_NOARP
|
IFF_MULTICAST
|
IFF_PORTSEL
|
IFF_AUTOMEDIA
))
|
IFF_DYNAMIC
|
IFF_MULTICAST
|
IFF_PORTSEL
|
(
dev
->
flags
&
(
IFF_UP
|
IFF_VOLATILE
|
IFF_PROMISC
|
IFF_ALLMULTI
));
IFF_AUTOMEDIA
))
|
(
dev
->
flags
&
(
IFF_UP
|
IFF_VOLATILE
|
IFF_PROMISC
|
IFF_ALLMULTI
));
/*
/*
* Load in the correct multicast list now the flags have changed.
* Load in the correct multicast list now the flags have changed.
...
@@ -1980,20 +1978,20 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
...
@@ -1980,20 +1978,20 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
*/
*/
ret
=
0
;
ret
=
0
;
if
((
old_flags
^
flags
)
&
IFF_UP
)
/* Bit is different ? */
if
((
old_flags
^
flags
)
&
IFF_UP
)
{
/* Bit is different ? */
{
ret
=
((
old_flags
&
IFF_UP
)
?
dev_close
:
dev_open
)(
dev
);
ret
=
((
old_flags
&
IFF_UP
)
?
dev_close
:
dev_open
)(
dev
);
if
(
ret
==
0
)
if
(
!
ret
)
dev_mc_upload
(
dev
);
dev_mc_upload
(
dev
);
}
}
if
(
dev
->
flags
&
IFF_UP
&&
if
(
dev
->
flags
&
IFF_UP
&&
((
old_flags
^
dev
->
flags
)
&~
(
IFF_UP
|
IFF_PROMISC
|
IFF_ALLMULTI
|
IFF_VOLATILE
)))
((
old_flags
^
dev
->
flags
)
&~
(
IFF_UP
|
IFF_PROMISC
|
IFF_ALLMULTI
|
IFF_VOLATILE
)))
notifier_call_chain
(
&
netdev_chain
,
NETDEV_CHANGE
,
dev
);
notifier_call_chain
(
&
netdev_chain
,
NETDEV_CHANGE
,
dev
);
if
((
flags
^
dev
->
gflags
)
&
IFF_PROMISC
)
{
if
((
flags
^
dev
->
gflags
)
&
IFF_PROMISC
)
{
int
inc
=
(
flags
&
IFF_PROMISC
)
?
+
1
:
-
1
;
int
inc
=
(
flags
&
IFF_PROMISC
)
?
+
1
:
-
1
;
dev
->
gflags
^=
IFF_PROMISC
;
dev
->
gflags
^=
IFF_PROMISC
;
dev_set_promiscuity
(
dev
,
inc
);
dev_set_promiscuity
(
dev
,
inc
);
}
}
...
@@ -2002,14 +2000,14 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
...
@@ -2002,14 +2000,14 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
is important. Some (broken) drivers set IFF_PROMISC, when
is important. Some (broken) drivers set IFF_PROMISC, when
IFF_ALLMULTI is requested not asking us and not reporting.
IFF_ALLMULTI is requested not asking us and not reporting.
*/
*/
if
((
flags
^
dev
->
gflags
)
&
IFF_ALLMULTI
)
{
if
((
flags
^
dev
->
gflags
)
&
IFF_ALLMULTI
)
{
int
inc
=
(
flags
&
IFF_ALLMULTI
)
?
+
1
:
-
1
;
int
inc
=
(
flags
&
IFF_ALLMULTI
)
?
+
1
:
-
1
;
dev
->
gflags
^=
IFF_ALLMULTI
;
dev
->
gflags
^=
IFF_ALLMULTI
;
dev_set_allmulti
(
dev
,
inc
);
dev_set_allmulti
(
dev
,
inc
);
}
}
if
(
old_flags
^
dev
->
flags
)
if
(
old_flags
^
dev
->
flags
)
rtmsg_ifinfo
(
RTM_NEWLINK
,
dev
,
old_flags
^
dev
->
flags
);
rtmsg_ifinfo
(
RTM_NEWLINK
,
dev
,
old_flags
^
dev
->
flags
);
return
ret
;
return
ret
;
}
}
...
@@ -2017,20 +2015,21 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
...
@@ -2017,20 +2015,21 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
/*
/*
* Perform the SIOCxIFxxx calls.
* Perform the SIOCxIFxxx calls.
*/
*/
static
int
dev_ifsioc
(
struct
ifreq
*
ifr
,
unsigned
int
cmd
)
static
int
dev_ifsioc
(
struct
ifreq
*
ifr
,
unsigned
int
cmd
)
{
{
struct
net_device
*
dev
;
int
err
;
int
err
;
struct
net_device
*
dev
=
__dev_get_by_name
(
ifr
->
ifr_name
);
if
(
(
dev
=
__dev_get_by_name
(
ifr
->
ifr_name
))
==
NULL
)
if
(
!
dev
)
return
-
ENODEV
;
return
-
ENODEV
;
switch
(
cmd
)
switch
(
cmd
)
{
{
case
SIOCGIFFLAGS
:
/* Get interface flags */
case
SIOCGIFFLAGS
:
/* Get interface flags */
ifr
->
ifr_flags
=
(
dev
->
flags
&~
(
IFF_PROMISC
|
IFF_ALLMULTI
|
IFF_RUNNING
))
ifr
->
ifr_flags
=
(
dev
->
flags
&
~
(
IFF_PROMISC
|
|
(
dev
->
gflags
&
(
IFF_PROMISC
|
IFF_ALLMULTI
));
IFF_ALLMULTI
|
IFF_RUNNING
))
|
(
dev
->
gflags
&
(
IFF_PROMISC
|
IFF_ALLMULTI
));
if
(
netif_running
(
dev
)
&&
netif_carrier_ok
(
dev
))
if
(
netif_running
(
dev
)
&&
netif_carrier_ok
(
dev
))
ifr
->
ifr_flags
|=
IFF_RUNNING
;
ifr
->
ifr_flags
|=
IFF_RUNNING
;
return
0
;
return
0
;
...
@@ -2038,11 +2037,13 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
...
@@ -2038,11 +2037,13 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
case
SIOCSIFFLAGS
:
/* Set interface flags */
case
SIOCSIFFLAGS
:
/* Set interface flags */
return
dev_change_flags
(
dev
,
ifr
->
ifr_flags
);
return
dev_change_flags
(
dev
,
ifr
->
ifr_flags
);
case
SIOCGIFMETRIC
:
/* Get the metric on the interface (currently unused) */
case
SIOCGIFMETRIC
:
/* Get the metric on the interface
(currently unused) */
ifr
->
ifr_metric
=
0
;
ifr
->
ifr_metric
=
0
;
return
0
;
return
0
;
case
SIOCSIFMETRIC
:
/* Set the metric on the interface (currently unused) */
case
SIOCSIFMETRIC
:
/* Set the metric on the interface
(currently unused) */
return
-
EOPNOTSUPP
;
return
-
EOPNOTSUPP
;
case
SIOCGIFMTU
:
/* Get the MTU of a device */
case
SIOCGIFMTU
:
/* Get the MTU of a device */
...
@@ -2056,80 +2057,85 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
...
@@ -2056,80 +2057,85 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
/*
/*
* MTU must be positive.
* MTU must be positive.
*/
*/
if
(
ifr
->
ifr_mtu
<
0
)
if
(
ifr
->
ifr_mtu
<
0
)
return
-
EINVAL
;
return
-
EINVAL
;
if
(
!
netif_device_present
(
dev
))
if
(
!
netif_device_present
(
dev
))
return
-
ENODEV
;
return
-
ENODEV
;
err
=
0
;
if
(
dev
->
change_mtu
)
if
(
dev
->
change_mtu
)
err
=
dev
->
change_mtu
(
dev
,
ifr
->
ifr_mtu
);
err
=
dev
->
change_mtu
(
dev
,
ifr
->
ifr_mtu
);
else
{
else
dev
->
mtu
=
ifr
->
ifr_mtu
;
dev
->
mtu
=
ifr
->
ifr_mtu
;
err
=
0
;
if
(
!
err
&&
dev
->
flags
&
IFF_UP
)
}
notifier_call_chain
(
&
netdev_chain
,
if
(
!
err
&&
dev
->
flags
&
IFF_UP
)
NETDEV_CHANGEMTU
,
dev
);
notifier_call_chain
(
&
netdev_chain
,
NETDEV_CHANGEMTU
,
dev
);
return
err
;
return
err
;
case
SIOCGIFHWADDR
:
case
SIOCGIFHWADDR
:
memcpy
(
ifr
->
ifr_hwaddr
.
sa_data
,
dev
->
dev_addr
,
MAX_ADDR_LEN
);
memcpy
(
ifr
->
ifr_hwaddr
.
sa_data
,
dev
->
dev_addr
,
ifr
->
ifr_hwaddr
.
sa_family
=
dev
->
type
;
MAX_ADDR_LEN
);
ifr
->
ifr_hwaddr
.
sa_family
=
dev
->
type
;
return
0
;
return
0
;
case
SIOCSIFHWADDR
:
case
SIOCSIFHWADDR
:
if
(
dev
->
set_mac_address
==
NULL
)
if
(
!
dev
->
set_mac_address
)
return
-
EOPNOTSUPP
;
return
-
EOPNOTSUPP
;
if
(
ifr
->
ifr_hwaddr
.
sa_family
!=
dev
->
type
)
if
(
ifr
->
ifr_hwaddr
.
sa_family
!=
dev
->
type
)
return
-
EINVAL
;
return
-
EINVAL
;
if
(
!
netif_device_present
(
dev
))
if
(
!
netif_device_present
(
dev
))
return
-
ENODEV
;
return
-
ENODEV
;
err
=
dev
->
set_mac_address
(
dev
,
&
ifr
->
ifr_hwaddr
);
err
=
dev
->
set_mac_address
(
dev
,
&
ifr
->
ifr_hwaddr
);
if
(
!
err
)
if
(
!
err
)
notifier_call_chain
(
&
netdev_chain
,
NETDEV_CHANGEADDR
,
dev
);
notifier_call_chain
(
&
netdev_chain
,
NETDEV_CHANGEADDR
,
dev
);
return
err
;
return
err
;
case
SIOCSIFHWBROADCAST
:
case
SIOCSIFHWBROADCAST
:
if
(
ifr
->
ifr_hwaddr
.
sa_family
!=
dev
->
type
)
if
(
ifr
->
ifr_hwaddr
.
sa_family
!=
dev
->
type
)
return
-
EINVAL
;
return
-
EINVAL
;
memcpy
(
dev
->
broadcast
,
ifr
->
ifr_hwaddr
.
sa_data
,
MAX_ADDR_LEN
);
memcpy
(
dev
->
broadcast
,
ifr
->
ifr_hwaddr
.
sa_data
,
notifier_call_chain
(
&
netdev_chain
,
NETDEV_CHANGEADDR
,
dev
);
MAX_ADDR_LEN
);
notifier_call_chain
(
&
netdev_chain
,
NETDEV_CHANGEADDR
,
dev
);
return
0
;
return
0
;
case
SIOCGIFMAP
:
case
SIOCGIFMAP
:
ifr
->
ifr_map
.
mem_start
=
dev
->
mem_start
;
ifr
->
ifr_map
.
mem_start
=
dev
->
mem_start
;
ifr
->
ifr_map
.
mem_end
=
dev
->
mem_end
;
ifr
->
ifr_map
.
mem_end
=
dev
->
mem_end
;
ifr
->
ifr_map
.
base_addr
=
dev
->
base_addr
;
ifr
->
ifr_map
.
base_addr
=
dev
->
base_addr
;
ifr
->
ifr_map
.
irq
=
dev
->
irq
;
ifr
->
ifr_map
.
irq
=
dev
->
irq
;
ifr
->
ifr_map
.
dma
=
dev
->
dma
;
ifr
->
ifr_map
.
dma
=
dev
->
dma
;
ifr
->
ifr_map
.
port
=
dev
->
if_port
;
ifr
->
ifr_map
.
port
=
dev
->
if_port
;
return
0
;
return
0
;
case
SIOCSIFMAP
:
case
SIOCSIFMAP
:
if
(
dev
->
set_config
)
{
if
(
dev
->
set_config
)
{
if
(
!
netif_device_present
(
dev
))
if
(
!
netif_device_present
(
dev
))
return
-
ENODEV
;
return
-
ENODEV
;
return
dev
->
set_config
(
dev
,
&
ifr
->
ifr_map
);
return
dev
->
set_config
(
dev
,
&
ifr
->
ifr_map
);
}
}
return
-
EOPNOTSUPP
;
return
-
EOPNOTSUPP
;
case
SIOCADDMULTI
:
case
SIOCADDMULTI
:
if
(
dev
->
set_multicast_list
==
NULL
||
if
(
!
dev
->
set_multicast_list
||
ifr
->
ifr_hwaddr
.
sa_family
!=
AF_UNSPEC
)
ifr
->
ifr_hwaddr
.
sa_family
!=
AF_UNSPEC
)
return
-
EINVAL
;
return
-
EINVAL
;
if
(
!
netif_device_present
(
dev
))
if
(
!
netif_device_present
(
dev
))
return
-
ENODEV
;
return
-
ENODEV
;
dev_mc_add
(
dev
,
ifr
->
ifr_hwaddr
.
sa_data
,
dev
->
addr_len
,
1
);
dev_mc_add
(
dev
,
ifr
->
ifr_hwaddr
.
sa_data
,
dev
->
addr_len
,
1
);
return
0
;
return
0
;
case
SIOCDELMULTI
:
case
SIOCDELMULTI
:
if
(
dev
->
set_multicast_list
==
NULL
||
if
(
!
dev
->
set_multicast_list
||
ifr
->
ifr_hwaddr
.
sa_family
!=
AF_UNSPEC
)
ifr
->
ifr_hwaddr
.
sa_family
!=
AF_UNSPEC
)
return
-
EINVAL
;
return
-
EINVAL
;
if
(
!
netif_device_present
(
dev
))
if
(
!
netif_device_present
(
dev
))
return
-
ENODEV
;
return
-
ENODEV
;
dev_mc_delete
(
dev
,
ifr
->
ifr_hwaddr
.
sa_data
,
dev
->
addr_len
,
1
);
dev_mc_delete
(
dev
,
ifr
->
ifr_hwaddr
.
sa_data
,
dev
->
addr_len
,
1
);
return
0
;
return
0
;
case
SIOCGIFINDEX
:
case
SIOCGIFINDEX
:
...
@@ -2141,19 +2147,20 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
...
@@ -2141,19 +2147,20 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
return
0
;
return
0
;
case
SIOCSIFTXQLEN
:
case
SIOCSIFTXQLEN
:
if
(
ifr
->
ifr_qlen
<
0
)
if
(
ifr
->
ifr_qlen
<
0
)
return
-
EINVAL
;
return
-
EINVAL
;
dev
->
tx_queue_len
=
ifr
->
ifr_qlen
;
dev
->
tx_queue_len
=
ifr
->
ifr_qlen
;
return
0
;
return
0
;
case
SIOCSIFNAME
:
case
SIOCSIFNAME
:
if
(
dev
->
flags
&
IFF_UP
)
if
(
dev
->
flags
&
IFF_UP
)
return
-
EBUSY
;
return
-
EBUSY
;
if
(
__dev_get_by_name
(
ifr
->
ifr_newname
))
if
(
__dev_get_by_name
(
ifr
->
ifr_newname
))
return
-
EEXIST
;
return
-
EEXIST
;
memcpy
(
dev
->
name
,
ifr
->
ifr_newname
,
IFNAMSIZ
);
memcpy
(
dev
->
name
,
ifr
->
ifr_newname
,
IFNAMSIZ
);
dev
->
name
[
IFNAMSIZ
-
1
]
=
0
;
dev
->
name
[
IFNAMSIZ
-
1
]
=
0
;
notifier_call_chain
(
&
netdev_chain
,
NETDEV_CHANGENAME
,
dev
);
notifier_call_chain
(
&
netdev_chain
,
NETDEV_CHANGENAME
,
dev
);
return
0
;
return
0
;
/*
/*
...
@@ -2174,16 +2181,19 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
...
@@ -2174,16 +2181,19 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
cmd
==
SIOCGMIIREG
||
cmd
==
SIOCGMIIREG
||
cmd
==
SIOCSMIIREG
||
cmd
==
SIOCSMIIREG
||
cmd
==
SIOCWANDEV
)
{
cmd
==
SIOCWANDEV
)
{
err
=
-
EOPNOTSUPP
;
if
(
dev
->
do_ioctl
)
{
if
(
dev
->
do_ioctl
)
{
if
(
!
netif_device_present
(
dev
))
if
(
netif_device_present
(
dev
))
return
-
ENODEV
;
err
=
dev
->
do_ioctl
(
dev
,
ifr
,
return
dev
->
do_ioctl
(
dev
,
ifr
,
cmd
);
cmd
);
}
else
return
-
EOPNOTSUPP
;
err
=
-
ENODEV
;
}
}
}
else
err
=
-
EINVAL
;
}
}
return
-
EINVAL
;
return
err
;
}
}
/*
/*
...
@@ -2219,9 +2229,8 @@ int dev_ioctl(unsigned int cmd, void *arg)
...
@@ -2219,9 +2229,8 @@ int dev_ioctl(unsigned int cmd, void *arg)
rtnl_shunlock
();
rtnl_shunlock
();
return
ret
;
return
ret
;
}
}
if
(
cmd
==
SIOCGIFNAME
)
{
if
(
cmd
==
SIOCGIFNAME
)
return
dev_ifname
((
struct
ifreq
*
)
arg
);
return
dev_ifname
((
struct
ifreq
*
)
arg
);
}
if
(
copy_from_user
(
&
ifr
,
arg
,
sizeof
(
struct
ifreq
)))
if
(
copy_from_user
(
&
ifr
,
arg
,
sizeof
(
struct
ifreq
)))
return
-
EFAULT
;
return
-
EFAULT
;
...
@@ -2236,15 +2245,13 @@ int dev_ioctl(unsigned int cmd, void *arg)
...
@@ -2236,15 +2245,13 @@ int dev_ioctl(unsigned int cmd, void *arg)
* See which interface the caller is talking about.
* See which interface the caller is talking about.
*/
*/
switch
(
cmd
)
switch
(
cmd
)
{
{
/*
/*
* These ioctl calls:
* These ioctl calls:
* - can be done by all.
* - can be done by all.
* - atomic and do not require locking.
* - atomic and do not require locking.
* - return a value
* - return a value
*/
*/
case
SIOCGIFFLAGS
:
case
SIOCGIFFLAGS
:
case
SIOCGIFMETRIC
:
case
SIOCGIFMETRIC
:
case
SIOCGIFMTU
:
case
SIOCGIFMTU
:
...
@@ -2260,8 +2267,9 @@ int dev_ioctl(unsigned int cmd, void *arg)
...
@@ -2260,8 +2267,9 @@ int dev_ioctl(unsigned int cmd, void *arg)
if
(
!
ret
)
{
if
(
!
ret
)
{
if
(
colon
)
if
(
colon
)
*
colon
=
':'
;
*
colon
=
':'
;
if
(
copy_to_user
(
arg
,
&
ifr
,
sizeof
(
struct
ifreq
)))
if
(
copy_to_user
(
arg
,
&
ifr
,
return
-
EFAULT
;
sizeof
(
struct
ifreq
)))
ret
=
-
EFAULT
;
}
}
return
ret
;
return
ret
;
...
@@ -2271,7 +2279,6 @@ int dev_ioctl(unsigned int cmd, void *arg)
...
@@ -2271,7 +2279,6 @@ int dev_ioctl(unsigned int cmd, void *arg)
* - require strict serialization.
* - require strict serialization.
* - return a value
* - return a value
*/
*/
case
SIOCETHTOOL
:
case
SIOCETHTOOL
:
case
SIOCGMIIPHY
:
case
SIOCGMIIPHY
:
case
SIOCGMIIREG
:
case
SIOCGMIIREG
:
...
@@ -2286,8 +2293,9 @@ int dev_ioctl(unsigned int cmd, void *arg)
...
@@ -2286,8 +2293,9 @@ int dev_ioctl(unsigned int cmd, void *arg)
if
(
!
ret
)
{
if
(
!
ret
)
{
if
(
colon
)
if
(
colon
)
*
colon
=
':'
;
*
colon
=
':'
;
if
(
copy_to_user
(
arg
,
&
ifr
,
sizeof
(
struct
ifreq
)))
if
(
copy_to_user
(
arg
,
&
ifr
,
return
-
EFAULT
;
sizeof
(
struct
ifreq
)))
ret
=
-
EFAULT
;
}
}
return
ret
;
return
ret
;
...
@@ -2297,7 +2305,6 @@ int dev_ioctl(unsigned int cmd, void *arg)
...
@@ -2297,7 +2305,6 @@ int dev_ioctl(unsigned int cmd, void *arg)
* - require strict serialization.
* - require strict serialization.
* - do not return a value
* - do not return a value
*/
*/
case
SIOCSIFFLAGS
:
case
SIOCSIFFLAGS
:
case
SIOCSIFMETRIC
:
case
SIOCSIFMETRIC
:
case
SIOCSIFMTU
:
case
SIOCSIFMTU
:
...
@@ -2327,17 +2334,17 @@ int dev_ioctl(unsigned int cmd, void *arg)
...
@@ -2327,17 +2334,17 @@ int dev_ioctl(unsigned int cmd, void *arg)
return
ret
;
return
ret
;
case
SIOCGIFMEM
:
case
SIOCGIFMEM
:
/* Get the per device memory space. We can add this but
currently
/* Get the per device memory space. We can add this but
do not support it */
* currently
do not support it */
case
SIOCSIFMEM
:
case
SIOCSIFMEM
:
/* Set the per device memory buffer space. Not applicable in our case */
/* Set the per device memory buffer space.
* Not applicable in our case */
case
SIOCSIFLINK
:
case
SIOCSIFLINK
:
return
-
EINVAL
;
return
-
EINVAL
;
/*
/*
* Unknown or private ioctl.
* Unknown or private ioctl.
*/
*/
default:
default:
if
(
cmd
==
SIOCWANDEV
||
if
(
cmd
==
SIOCWANDEV
||
(
cmd
>=
SIOCDEVPRIVATE
&&
(
cmd
>=
SIOCDEVPRIVATE
&&
...
@@ -2348,8 +2355,9 @@ int dev_ioctl(unsigned int cmd, void *arg)
...
@@ -2348,8 +2355,9 @@ int dev_ioctl(unsigned int cmd, void *arg)
ret
=
dev_ifsioc
(
&
ifr
,
cmd
);
ret
=
dev_ifsioc
(
&
ifr
,
cmd
);
rtnl_unlock
();
rtnl_unlock
();
dev_probe_unlock
();
dev_probe_unlock
();
if
(
!
ret
&&
copy_to_user
(
arg
,
&
ifr
,
sizeof
(
struct
ifreq
)))
if
(
!
ret
&&
copy_to_user
(
arg
,
&
ifr
,
return
-
EFAULT
;
sizeof
(
struct
ifreq
)))
ret
=
-
EFAULT
;
return
ret
;
return
ret
;
}
}
#ifdef WIRELESS_EXT
#ifdef WIRELESS_EXT
...
@@ -2358,8 +2366,8 @@ int dev_ioctl(unsigned int cmd, void *arg)
...
@@ -2358,8 +2366,8 @@ int dev_ioctl(unsigned int cmd, void *arg)
/* If command is `set a parameter', or
/* If command is `set a parameter', or
* `get the encoding parameters', check if
* `get the encoding parameters', check if
* the user has the right to do it */
* the user has the right to do it */
if
(
IW_IS_SET
(
cmd
)
||
(
cmd
==
SIOCGIWENCODE
)
)
{
if
(
IW_IS_SET
(
cmd
)
||
cmd
==
SIOCGIWENCODE
)
{
if
(
!
capable
(
CAP_NET_ADMIN
))
if
(
!
capable
(
CAP_NET_ADMIN
))
return
-
EPERM
;
return
-
EPERM
;
}
}
dev_load
(
ifr
.
ifr_name
);
dev_load
(
ifr
.
ifr_name
);
...
@@ -2368,8 +2376,9 @@ int dev_ioctl(unsigned int cmd, void *arg)
...
@@ -2368,8 +2376,9 @@ int dev_ioctl(unsigned int cmd, void *arg)
ret
=
wireless_process_ioctl
(
&
ifr
,
cmd
);
ret
=
wireless_process_ioctl
(
&
ifr
,
cmd
);
rtnl_unlock
();
rtnl_unlock
();
if
(
!
ret
&&
IW_IS_GET
(
cmd
)
&&
if
(
!
ret
&&
IW_IS_GET
(
cmd
)
&&
copy_to_user
(
arg
,
&
ifr
,
sizeof
(
struct
ifreq
)))
copy_to_user
(
arg
,
&
ifr
,
return
-
EFAULT
;
sizeof
(
struct
ifreq
)))
ret
=
-
EFAULT
;
return
ret
;
return
ret
;
}
}
#endif
/* WIRELESS_EXT */
#endif
/* WIRELESS_EXT */
...
@@ -2385,14 +2394,13 @@ int dev_ioctl(unsigned int cmd, void *arg)
...
@@ -2385,14 +2394,13 @@ int dev_ioctl(unsigned int cmd, void *arg)
* number. The caller must hold the rtnl semaphore or the
* number. The caller must hold the rtnl semaphore or the
* dev_base_lock to be sure it remains unique.
* dev_base_lock to be sure it remains unique.
*/
*/
int
dev_new_index
(
void
)
int
dev_new_index
(
void
)
{
{
static
int
ifindex
;
static
int
ifindex
;
for
(;;)
{
for
(;;)
{
if
(
++
ifindex
<=
0
)
if
(
++
ifindex
<=
0
)
ifindex
=
1
;
ifindex
=
1
;
if
(
__dev_get_by_index
(
ifindex
)
==
NULL
)
if
(
!
__dev_get_by_index
(
ifindex
)
)
return
ifindex
;
return
ifindex
;
}
}
}
}
...
@@ -2422,15 +2430,13 @@ int net_dev_init(void);
...
@@ -2422,15 +2430,13 @@ int net_dev_init(void);
int
register_netdevice
(
struct
net_device
*
dev
)
int
register_netdevice
(
struct
net_device
*
dev
)
{
{
struct
net_device
*
d
,
**
dp
;
struct
net_device
*
d
,
**
dp
;
#ifdef CONFIG_NET_DIVERT
int
ret
;
int
ret
;
#endif
spin_lock_init
(
&
dev
->
queue_lock
);
spin_lock_init
(
&
dev
->
queue_lock
);
spin_lock_init
(
&
dev
->
xmit_lock
);
spin_lock_init
(
&
dev
->
xmit_lock
);
dev
->
xmit_lock_owner
=
-
1
;
dev
->
xmit_lock_owner
=
-
1
;
#ifdef CONFIG_NET_FASTROUTE
#ifdef CONFIG_NET_FASTROUTE
dev
->
fastpath_lock
=
RW_LOCK_UNLOCKED
;
dev
->
fastpath_lock
=
RW_LOCK_UNLOCKED
;
#endif
#endif
if
(
dev_boot_phase
)
if
(
dev_boot_phase
)
...
@@ -2439,38 +2445,32 @@ int register_netdevice(struct net_device *dev)
...
@@ -2439,38 +2445,32 @@ int register_netdevice(struct net_device *dev)
#ifdef CONFIG_NET_DIVERT
#ifdef CONFIG_NET_DIVERT
ret
=
alloc_divert_blk
(
dev
);
ret
=
alloc_divert_blk
(
dev
);
if
(
ret
)
if
(
ret
)
return
re
t
;
goto
ou
t
;
#endif
/* CONFIG_NET_DIVERT */
#endif
/* CONFIG_NET_DIVERT */
dev
->
iflink
=
-
1
;
dev
->
iflink
=
-
1
;
/* Init, if this function is available */
/* Init, if this function is available */
if
(
dev
->
init
&&
dev
->
init
(
dev
)
!=
0
)
{
ret
=
-
EIO
;
#ifdef CONFIG_NET_DIVERT
if
(
dev
->
init
&&
dev
->
init
(
dev
))
free_divert_blk
(
dev
);
goto
out_err
;
#endif
return
-
EIO
;
}
dev
->
ifindex
=
dev_new_index
();
dev
->
ifindex
=
dev_new_index
();
if
(
dev
->
iflink
==
-
1
)
if
(
dev
->
iflink
==
-
1
)
dev
->
iflink
=
dev
->
ifindex
;
dev
->
iflink
=
dev
->
ifindex
;
/* Check for existence, and append to tail of chain */
/* Check for existence, and append to tail of chain */
for
(
dp
=&
dev_base
;
(
d
=*
dp
)
!=
NULL
;
dp
=&
d
->
next
)
{
ret
=
-
EEXIST
;
if
(
d
==
dev
||
strcmp
(
d
->
name
,
dev
->
name
)
==
0
)
{
for
(
dp
=
&
dev_base
;
(
d
=
*
dp
)
!=
NULL
;
dp
=
&
d
->
next
)
{
#ifdef CONFIG_NET_DIVERT
if
(
d
==
dev
||
!
strcmp
(
d
->
name
,
dev
->
name
))
free_divert_blk
(
dev
);
goto
out_err
;
#endif
return
-
EEXIST
;
}
}
}
/*
/*
* nil rebuild_header routine,
* nil rebuild_header routine,
* that should be never called and used as just bug trap.
* that should be never called and used as just bug trap.
*/
*/
if
(
dev
->
rebuild_header
==
NULL
)
if
(
!
dev
->
rebuild_header
)
dev
->
rebuild_header
=
default_rebuild_header
;
dev
->
rebuild_header
=
default_rebuild_header
;
/*
/*
...
@@ -2492,8 +2492,15 @@ int register_netdevice(struct net_device *dev)
...
@@ -2492,8 +2492,15 @@ int register_netdevice(struct net_device *dev)
notifier_call_chain
(
&
netdev_chain
,
NETDEV_REGISTER
,
dev
);
notifier_call_chain
(
&
netdev_chain
,
NETDEV_REGISTER
,
dev
);
net_run_sbin_hotplug
(
dev
,
"register"
);
net_run_sbin_hotplug
(
dev
,
"register"
);
ret
=
0
;
return
0
;
out:
return
ret
;
out_err:
#ifdef CONFIG_NET_DIVERT
free_divert_blk
(
dev
);
#endif
goto
out
;
}
}
/**
/**
...
@@ -2503,15 +2510,15 @@ int register_netdevice(struct net_device *dev)
...
@@ -2503,15 +2510,15 @@ int register_netdevice(struct net_device *dev)
* Destroy and free a dead device. A value of zero is returned on
* Destroy and free a dead device. A value of zero is returned on
* success.
* success.
*/
*/
int
netdev_finish_unregister
(
struct
net_device
*
dev
)
int
netdev_finish_unregister
(
struct
net_device
*
dev
)
{
{
BUG_TRAP
(
dev
->
ip_ptr
==
NULL
);
BUG_TRAP
(
!
dev
->
ip_ptr
);
BUG_TRAP
(
dev
->
ip6_ptr
==
NULL
);
BUG_TRAP
(
!
dev
->
ip6_ptr
);
BUG_TRAP
(
dev
->
dn_ptr
==
NULL
);
BUG_TRAP
(
!
dev
->
dn_ptr
);
if
(
!
dev
->
deadbeaf
)
{
if
(
!
dev
->
deadbeaf
)
{
printk
(
KERN_ERR
"Freeing alive device %p, %s
\n
"
,
dev
,
dev
->
name
);
printk
(
KERN_ERR
"Freeing alive device %p, %s
\n
"
,
dev
,
dev
->
name
);
return
0
;
return
0
;
}
}
#ifdef NET_REFCNT_DEBUG
#ifdef NET_REFCNT_DEBUG
...
@@ -2547,11 +2554,11 @@ int unregister_netdevice(struct net_device *dev)
...
@@ -2547,11 +2554,11 @@ int unregister_netdevice(struct net_device *dev)
if
(
dev
->
flags
&
IFF_UP
)
if
(
dev
->
flags
&
IFF_UP
)
dev_close
(
dev
);
dev_close
(
dev
);
BUG_TRAP
(
dev
->
deadbeaf
==
0
);
BUG_TRAP
(
!
dev
->
deadbeaf
);
dev
->
deadbeaf
=
1
;
dev
->
deadbeaf
=
1
;
/* And unlink it from device chain. */
/* And unlink it from device chain. */
for
(
dp
=
&
dev_base
;
(
d
=*
dp
)
!=
NULL
;
dp
=
&
d
->
next
)
{
for
(
dp
=
&
dev_base
;
(
d
=
*
dp
)
!=
NULL
;
dp
=
&
d
->
next
)
{
if
(
d
==
dev
)
{
if
(
d
==
dev
)
{
write_lock_bh
(
&
dev_base_lock
);
write_lock_bh
(
&
dev_base_lock
);
*
dp
=
d
->
next
;
*
dp
=
d
->
next
;
...
@@ -2559,8 +2566,9 @@ int unregister_netdevice(struct net_device *dev)
...
@@ -2559,8 +2566,9 @@ int unregister_netdevice(struct net_device *dev)
break
;
break
;
}
}
}
}
if
(
d
==
NULL
)
{
if
(
!
d
)
{
printk
(
KERN_DEBUG
"unregister_netdevice: device %s/%p never was registered
\n
"
,
dev
->
name
,
dev
);
printk
(
KERN_DEBUG
"unregister_netdevice: device %s/%p never "
"was registered
\n
"
,
dev
->
name
,
dev
);
return
-
ENODEV
;
return
-
ENODEV
;
}
}
...
@@ -2568,7 +2576,7 @@ int unregister_netdevice(struct net_device *dev)
...
@@ -2568,7 +2576,7 @@ int unregister_netdevice(struct net_device *dev)
br_write_lock_bh
(
BR_NETPROTO_LOCK
);
br_write_lock_bh
(
BR_NETPROTO_LOCK
);
br_write_unlock_bh
(
BR_NETPROTO_LOCK
);
br_write_unlock_bh
(
BR_NETPROTO_LOCK
);
if
(
dev_boot_phase
==
0
)
{
if
(
!
dev_boot_phase
)
{
#ifdef CONFIG_NET_FASTROUTE
#ifdef CONFIG_NET_FASTROUTE
dev_clear_fastroute
(
dev
);
dev_clear_fastroute
(
dev
);
#endif
#endif
...
@@ -2593,7 +2601,7 @@ int unregister_netdevice(struct net_device *dev)
...
@@ -2593,7 +2601,7 @@ int unregister_netdevice(struct net_device *dev)
dev
->
uninit
(
dev
);
dev
->
uninit
(
dev
);
/* Notifier chain MUST detach us from master device. */
/* Notifier chain MUST detach us from master device. */
BUG_TRAP
(
dev
->
master
==
NULL
);
BUG_TRAP
(
!
dev
->
master
);
#ifdef CONFIG_NET_DIVERT
#ifdef CONFIG_NET_DIVERT
free_divert_blk
(
dev
);
free_divert_blk
(
dev
);
...
@@ -2602,20 +2610,20 @@ int unregister_netdevice(struct net_device *dev)
...
@@ -2602,20 +2610,20 @@ int unregister_netdevice(struct net_device *dev)
if
(
dev
->
features
&
NETIF_F_DYNALLOC
)
{
if
(
dev
->
features
&
NETIF_F_DYNALLOC
)
{
#ifdef NET_REFCNT_DEBUG
#ifdef NET_REFCNT_DEBUG
if
(
atomic_read
(
&
dev
->
refcnt
)
!=
1
)
if
(
atomic_read
(
&
dev
->
refcnt
)
!=
1
)
printk
(
KERN_DEBUG
"unregister_netdevice: holding %s refcnt=%d
\n
"
,
dev
->
name
,
atomic_read
(
&
dev
->
refcnt
)
-
1
);
printk
(
KERN_DEBUG
"unregister_netdevice: holding %s "
"refcnt=%d
\n
"
,
dev
->
name
,
atomic_read
(
&
dev
->
refcnt
)
-
1
);
#endif
#endif
dev_put
(
dev
);
goto
out
;
return
0
;
}
}
/* Last reference is our one */
/* Last reference is our one */
if
(
atomic_read
(
&
dev
->
refcnt
)
==
1
)
{
if
(
atomic_read
(
&
dev
->
refcnt
)
==
1
)
dev_put
(
dev
);
goto
out
;
return
0
;
}
#ifdef NET_REFCNT_DEBUG
#ifdef NET_REFCNT_DEBUG
printk
(
"unregister_netdevice: waiting %s refcnt=%d
\n
"
,
dev
->
name
,
atomic_read
(
&
dev
->
refcnt
));
printk
(
KERN_DEBUG
"unregister_netdevice: waiting %s refcnt=%d
\n
"
,
dev
->
name
,
atomic_read
(
&
dev
->
refcnt
));
#endif
#endif
/* EXPLANATION. If dev->refcnt is not now 1 (our own reference)
/* EXPLANATION. If dev->refcnt is not now 1 (our own reference)
...
@@ -2623,14 +2631,15 @@ int unregister_netdevice(struct net_device *dev)
...
@@ -2623,14 +2631,15 @@ int unregister_netdevice(struct net_device *dev)
to this device and we cannot release it.
to this device and we cannot release it.
"New style" devices have destructors, hence we can return from this
"New style" devices have destructors, hence we can return from this
function and destructor will do all the work later.
As of kernel 2.4.0
function and destructor will do all the work later.
As of kernel
there are very few "New Style" devices.
2.4.0
there are very few "New Style" devices.
"Old style" devices expect that the device is free of any references
"Old style" devices expect that the device is free of any references
upon exit from this function.
upon exit from this function.
We cannot return from this function until all such references have
We cannot return from this function until all such references have
fallen away. This is because the caller of this function will probably
fallen away. This is because the caller of this function will
immediately kfree(*dev) and then be unloaded via sys_delete_module.
probably immediately kfree(*dev) and then be unloaded via
sys_delete_module.
So, we linger until all references fall away. The duration of the
So, we linger until all references fall away. The duration of the
linger is basically unbounded! It is driven by, for example, the
linger is basically unbounded! It is driven by, for example, the
...
@@ -2643,20 +2652,22 @@ int unregister_netdevice(struct net_device *dev)
...
@@ -2643,20 +2652,22 @@ int unregister_netdevice(struct net_device *dev)
now
=
warning_time
=
jiffies
;
now
=
warning_time
=
jiffies
;
while
(
atomic_read
(
&
dev
->
refcnt
)
!=
1
)
{
while
(
atomic_read
(
&
dev
->
refcnt
)
!=
1
)
{
if
((
jiffies
-
now
)
>
1
*
HZ
)
{
if
((
jiffies
-
now
)
>
1
*
HZ
)
{
/* Rebroadcast unregister notification */
/* Rebroadcast unregister notification */
notifier_call_chain
(
&
netdev_chain
,
NETDEV_UNREGISTER
,
dev
);
notifier_call_chain
(
&
netdev_chain
,
NETDEV_UNREGISTER
,
dev
);
}
}
current
->
state
=
TASK_INTERRUPTIBLE
;
current
->
state
=
TASK_INTERRUPTIBLE
;
schedule_timeout
(
HZ
/
4
);
schedule_timeout
(
HZ
/
4
);
current
->
state
=
TASK_RUNNING
;
current
->
state
=
TASK_RUNNING
;
if
((
jiffies
-
warning_time
)
>
10
*
HZ
)
{
if
((
jiffies
-
warning_time
)
>
10
*
HZ
)
{
printk
(
KERN_EMERG
"unregister_netdevice: waiting for
%s to
"
printk
(
KERN_EMERG
"unregister_netdevice: waiting for "
"
become free. Usage count = %d
\n
"
,
"%s to
become free. Usage count = %d
\n
"
,
dev
->
name
,
atomic_read
(
&
dev
->
refcnt
));
dev
->
name
,
atomic_read
(
&
dev
->
refcnt
));
warning_time
=
jiffies
;
warning_time
=
jiffies
;
}
}
}
}
out:
dev_put
(
dev
);
dev_put
(
dev
);
return
0
;
return
0
;
}
}
...
@@ -2770,7 +2781,7 @@ int __init net_dev_init(void)
...
@@ -2770,7 +2781,7 @@ int __init net_dev_init(void)
dev
->
ifindex
=
dev_new_index
();
dev
->
ifindex
=
dev_new_index
();
if
(
dev
->
iflink
==
-
1
)
if
(
dev
->
iflink
==
-
1
)
dev
->
iflink
=
dev
->
ifindex
;
dev
->
iflink
=
dev
->
ifindex
;
if
(
dev
->
rebuild_header
==
NULL
)
if
(
!
dev
->
rebuild_header
)
dev
->
rebuild_header
=
default_rebuild_header
;
dev
->
rebuild_header
=
default_rebuild_header
;
dev_init_scheduler
(
dev
);
dev_init_scheduler
(
dev
);
set_bit
(
__LINK_STATE_PRESENT
,
&
dev
->
state
);
set_bit
(
__LINK_STATE_PRESENT
,
&
dev
->
state
);
...
...
net/ipv4/af_inet.c
View file @
868f24fc
...
@@ -21,30 +21,34 @@
...
@@ -21,30 +21,34 @@
* so sockets that fail to connect
* so sockets that fail to connect
* don't return -EINPROGRESS.
* don't return -EINPROGRESS.
* Alan Cox : Asynchronous I/O support
* Alan Cox : Asynchronous I/O support
* Alan Cox : Keep correct socket pointer on sock structures
* Alan Cox : Keep correct socket pointer on sock
* structures
* when accept() ed
* when accept() ed
* Alan Cox : Semantics of SO_LINGER aren't state
moved
* Alan Cox : Semantics of SO_LINGER aren't state
*
to close when you look carefully. With
*
moved to close when you look carefully.
*
this fixed and the accept bug fixed
*
With this fixed and the accept bug fixed
* some RPC stuff seems happier.
* some RPC stuff seems happier.
* Niibe Yutaka : 4.4BSD style write async I/O
* Niibe Yutaka : 4.4BSD style write async I/O
* Alan Cox,
* Alan Cox,
* Tony Gale : Fixed reuse semantics.
* Tony Gale : Fixed reuse semantics.
* Alan Cox : bind() shouldn't abort existing but dead
* Alan Cox : bind() shouldn't abort existing but dead
* sockets. Stops FTP netin:.. I hope.
* sockets. Stops FTP netin:.. I hope.
* Alan Cox : bind() works correctly for RAW sockets. Note
* Alan Cox : bind() works correctly for RAW sockets.
* that FreeBSD at least was broken in this respect
* Note that FreeBSD at least was broken
* so be careful with compatibility tests...
* in this respect so be careful with
* compatibility tests...
* Alan Cox : routing cache support
* Alan Cox : routing cache support
* Alan Cox : memzero the socket structure for compactness.
* Alan Cox : memzero the socket structure for
* compactness.
* Matt Day : nonblock connect error handler
* Matt Day : nonblock connect error handler
* Alan Cox : Allow large numbers of pending sockets
* Alan Cox : Allow large numbers of pending sockets
* (eg for big web sites), but only if
* (eg for big web sites), but only if
* specifically application requested.
* specifically application requested.
* Alan Cox : New buffering throughout IP. Used dumbly.
* Alan Cox : New buffering throughout IP. Used
* dumbly.
* Alan Cox : New buffering now used smartly.
* Alan Cox : New buffering now used smartly.
* Alan Cox : BSD rather than common sense
interpretation of
* Alan Cox : BSD rather than common sense
* listen.
*
interpretation of
listen.
* Germano Caronni : Assorted small races.
* Germano Caronni : Assorted small races.
* Alan Cox : sendmsg/recvmsg basic support.
* Alan Cox : sendmsg/recvmsg basic support.
* Alan Cox : Only sendmsg/recvmsg now supported.
* Alan Cox : Only sendmsg/recvmsg now supported.
...
@@ -117,7 +121,7 @@
...
@@ -117,7 +121,7 @@
#include <linux/wireless.h>
/* Note : will define WIRELESS_EXT */
#include <linux/wireless.h>
/* Note : will define WIRELESS_EXT */
#endif
/* CONFIG_NET_RADIO || CONFIG_NET_PCMCIA_RADIO */
#endif
/* CONFIG_NET_RADIO || CONFIG_NET_PCMCIA_RADIO */
struct
linux_mib
net_statistics
[
NR_CPUS
*
2
];
struct
linux_mib
net_statistics
[
NR_CPUS
*
2
];
#ifdef INET_REFCNT_DEBUG
#ifdef INET_REFCNT_DEBUG
atomic_t
inet_sock_nr
;
atomic_t
inet_sock_nr
;
...
@@ -132,7 +136,7 @@ extern int udp_get_info(char *, char **, off_t, int);
...
@@ -132,7 +136,7 @@ extern int udp_get_info(char *, char **, off_t, int);
extern
void
ip_mc_drop_socket
(
struct
sock
*
sk
);
extern
void
ip_mc_drop_socket
(
struct
sock
*
sk
);
#ifdef CONFIG_DLCI
#ifdef CONFIG_DLCI
extern
int
dlci_ioctl
(
unsigned
int
,
void
*
);
extern
int
dlci_ioctl
(
unsigned
int
,
void
*
);
#endif
#endif
#ifdef CONFIG_DLCI_MODULE
#ifdef CONFIG_DLCI_MODULE
...
@@ -177,17 +181,18 @@ void inet_sock_destruct(struct sock *sk)
...
@@ -177,17 +181,18 @@ void inet_sock_destruct(struct sock *sk)
return
;
return
;
}
}
BUG_TRAP
(
atomic_read
(
&
sk
->
rmem_alloc
)
==
0
);
BUG_TRAP
(
!
atomic_read
(
&
sk
->
rmem_alloc
)
);
BUG_TRAP
(
atomic_read
(
&
sk
->
wmem_alloc
)
==
0
);
BUG_TRAP
(
!
atomic_read
(
&
sk
->
wmem_alloc
)
);
BUG_TRAP
(
sk
->
wmem_queued
==
0
);
BUG_TRAP
(
!
sk
->
wmem_queued
);
BUG_TRAP
(
sk
->
forward_alloc
==
0
);
BUG_TRAP
(
!
sk
->
forward_alloc
);
if
(
inet
->
opt
)
if
(
inet
->
opt
)
kfree
(
inet
->
opt
);
kfree
(
inet
->
opt
);
dst_release
(
sk
->
dst_cache
);
dst_release
(
sk
->
dst_cache
);
#ifdef INET_REFCNT_DEBUG
#ifdef INET_REFCNT_DEBUG
atomic_dec
(
&
inet_sock_nr
);
atomic_dec
(
&
inet_sock_nr
);
printk
(
KERN_DEBUG
"INET socket %p released, %d are still alive
\n
"
,
sk
,
atomic_read
(
&
inet_sock_nr
));
printk
(
KERN_DEBUG
"INET socket %p released, %d are still alive
\n
"
,
sk
,
atomic_read
(
&
inet_sock_nr
));
#endif
#endif
}
}
...
@@ -221,9 +226,9 @@ void inet_sock_release(struct sock *sk)
...
@@ -221,9 +226,9 @@ void inet_sock_release(struct sock *sk)
sock_orphan
(
sk
);
sock_orphan
(
sk
);
#ifdef INET_REFCNT_DEBUG
#ifdef INET_REFCNT_DEBUG
if
(
atomic_read
(
&
sk
->
refcnt
)
!=
1
)
{
if
(
atomic_read
(
&
sk
->
refcnt
)
!=
1
)
printk
(
KERN_DEBUG
"Destruction inet %p delayed, c=%d
\n
"
,
sk
,
atomic_read
(
&
sk
->
refcnt
));
printk
(
KERN_DEBUG
"Destruction inet %p delayed, c=%d
\n
"
,
}
sk
,
atomic_read
(
&
sk
->
refcnt
));
#endif
#endif
sock_put
(
sk
);
sock_put
(
sk
);
}
}
...
@@ -235,17 +240,15 @@ void inet_sock_release(struct sock *sk)
...
@@ -235,17 +240,15 @@ void inet_sock_release(struct sock *sk)
* the work.
* the work.
*/
*/
/*
/*
* Set socket options on an inet socket.
* Set socket options on an inet socket.
*/
*/
int
inet_setsockopt
(
struct
socket
*
sock
,
int
level
,
int
optname
,
int
inet_setsockopt
(
struct
socket
*
sock
,
int
level
,
int
optname
,
char
*
optval
,
int
optlen
)
char
*
optval
,
int
optlen
)
{
{
struct
sock
*
sk
=
sock
->
sk
;
struct
sock
*
sk
=
sock
->
sk
;
return
sk
->
prot
->
setsockopt
(
sk
,
level
,
optname
,
optval
,
optlen
);
return
sk
->
prot
->
setsockopt
(
sk
,
level
,
optname
,
optval
,
optlen
);
}
}
/*
/*
...
@@ -259,9 +262,9 @@ int inet_setsockopt(struct socket *sock, int level, int optname,
...
@@ -259,9 +262,9 @@ int inet_setsockopt(struct socket *sock, int level, int optname,
int
inet_getsockopt
(
struct
socket
*
sock
,
int
level
,
int
optname
,
int
inet_getsockopt
(
struct
socket
*
sock
,
int
level
,
int
optname
,
char
*
optval
,
int
*
optlen
)
char
*
optval
,
int
*
optlen
)
{
{
struct
sock
*
sk
=
sock
->
sk
;
struct
sock
*
sk
=
sock
->
sk
;
return
sk
->
prot
->
getsockopt
(
sk
,
level
,
optname
,
optval
,
optlen
);
return
sk
->
prot
->
getsockopt
(
sk
,
level
,
optname
,
optval
,
optlen
);
}
}
/*
/*
...
@@ -270,11 +273,12 @@ int inet_getsockopt(struct socket *sock, int level, int optname,
...
@@ -270,11 +273,12 @@ int inet_getsockopt(struct socket *sock, int level, int optname,
static
int
inet_autobind
(
struct
sock
*
sk
)
static
int
inet_autobind
(
struct
sock
*
sk
)
{
{
struct
inet_opt
*
inet
=
inet_sk
(
sk
)
;
struct
inet_opt
*
inet
;
/* We may need to bind the socket. */
/* We may need to bind the socket. */
lock_sock
(
sk
);
lock_sock
(
sk
);
inet
=
inet_sk
(
sk
);
if
(
!
inet
->
num
)
{
if
(
!
inet
->
num
)
{
if
(
sk
->
prot
->
get_port
(
sk
,
0
)
!=
0
)
{
if
(
sk
->
prot
->
get_port
(
sk
,
0
))
{
release_sock
(
sk
);
release_sock
(
sk
);
return
-
EAGAIN
;
return
-
EAGAIN
;
}
}
...
@@ -287,7 +291,6 @@ static int inet_autobind(struct sock *sk)
...
@@ -287,7 +291,6 @@ static int inet_autobind(struct sock *sk)
/*
/*
* Move a socket into listening state.
* Move a socket into listening state.
*/
*/
int
inet_listen
(
struct
socket
*
sock
,
int
backlog
)
int
inet_listen
(
struct
socket
*
sock
,
int
backlog
)
{
{
struct
sock
*
sk
=
sock
->
sk
;
struct
sock
*
sk
=
sock
->
sk
;
...
@@ -301,7 +304,7 @@ int inet_listen(struct socket *sock, int backlog)
...
@@ -301,7 +304,7 @@ int inet_listen(struct socket *sock, int backlog)
goto
out
;
goto
out
;
old_state
=
sk
->
state
;
old_state
=
sk
->
state
;
if
(
!
((
1
<<
old_state
)
&
(
TCPF_CLOSE
|
TCPF_LISTEN
)))
if
(
!
((
1
<<
old_state
)
&
(
TCPF_CLOSE
|
TCPF_LISTEN
)))
goto
out
;
goto
out
;
/* Really, if the socket is already in listen state
/* Really, if the socket is already in listen state
...
@@ -352,12 +355,13 @@ static int inet_create(struct socket *sock, int protocol)
...
@@ -352,12 +355,13 @@ static int inet_create(struct socket *sock, int protocol)
struct
list_head
*
p
;
struct
list_head
*
p
;
struct
inet_protosw
*
answer
;
struct
inet_protosw
*
answer
;
struct
inet_opt
*
inet
;
struct
inet_opt
*
inet
;
int
err
=
-
ENOBUFS
;
sock
->
state
=
SS_UNCONNECTED
;
sock
->
state
=
SS_UNCONNECTED
;
sk
=
sk_alloc
(
PF_INET
,
GFP_KERNEL
,
inet_sk_size
(
protocol
),
sk
=
sk_alloc
(
PF_INET
,
GFP_KERNEL
,
inet_sk_size
(
protocol
),
inet_sk_slab
(
protocol
));
inet_sk_slab
(
protocol
));
if
(
sk
==
NULL
)
if
(
!
sk
)
goto
do_oom
;
goto
out
;
/* Look for the requested type/protocol pair. */
/* Look for the requested type/protocol pair. */
answer
=
NULL
;
answer
=
NULL
;
...
@@ -382,13 +386,16 @@ static int inet_create(struct socket *sock, int protocol)
...
@@ -382,13 +386,16 @@ static int inet_create(struct socket *sock, int protocol)
}
}
br_read_unlock_bh
(
BR_NETPROTO_LOCK
);
br_read_unlock_bh
(
BR_NETPROTO_LOCK
);
err
=
-
ESOCKTNOSUPPORT
;
if
(
!
answer
)
if
(
!
answer
)
goto
free_and_badtype
;
goto
out_sk_free
;
err
=
-
EPERM
;
if
(
answer
->
capability
>
0
&&
!
capable
(
answer
->
capability
))
if
(
answer
->
capability
>
0
&&
!
capable
(
answer
->
capability
))
goto
free_and_badperm
;
goto
out_sk_free
;
err
=
-
EPROTONOSUPPORT
;
if
(
!
protocol
)
if
(
!
protocol
)
goto
free_and_noproto
;
goto
out_sk_free
;
err
=
0
;
sock
->
ops
=
answer
->
ops
;
sock
->
ops
=
answer
->
ops
;
sk
->
prot
=
answer
->
prot
;
sk
->
prot
=
answer
->
prot
;
sk
->
no_check
=
answer
->
no_check
;
sk
->
no_check
=
answer
->
no_check
;
...
@@ -410,18 +417,15 @@ static int inet_create(struct socket *sock, int protocol)
...
@@ -410,18 +417,15 @@ static int inet_create(struct socket *sock, int protocol)
inet
->
id
=
0
;
inet
->
id
=
0
;
sock_init_data
(
sock
,
sk
);
sock_init_data
(
sock
,
sk
);
sk
->
destruct
=
inet_sock_destruct
;
sk
->
destruct
=
inet_sock_destruct
;
sk
->
zapped
=
0
;
sk
->
zapped
=
0
;
sk
->
family
=
PF_INET
;
sk
->
family
=
PF_INET
;
sk
->
protocol
=
protocol
;
sk
->
protocol
=
protocol
;
sk
->
backlog_rcv
=
sk
->
prot
->
backlog_rcv
;
sk
->
backlog_rcv
=
sk
->
prot
->
backlog_rcv
;
inet
->
ttl
=
sysctl_ip_default_ttl
;
inet
->
ttl
=
sysctl_ip_default_ttl
;
inet
->
mc_loop
=
1
;
inet
->
mc_loop
=
1
;
inet
->
mc_ttl
=
1
;
inet
->
mc_ttl
=
1
;
inet
->
mc_index
=
0
;
inet
->
mc_index
=
0
;
...
@@ -438,34 +442,20 @@ static int inet_create(struct socket *sock, int protocol)
...
@@ -438,34 +442,20 @@ static int inet_create(struct socket *sock, int protocol)
* shares.
* shares.
*/
*/
inet
->
sport
=
htons
(
inet
->
num
);
inet
->
sport
=
htons
(
inet
->
num
);
/* Add to protocol hash chains. */
/* Add to protocol hash chains. */
sk
->
prot
->
hash
(
sk
);
sk
->
prot
->
hash
(
sk
);
}
}
if
(
sk
->
prot
->
init
)
{
if
(
sk
->
prot
->
init
)
{
int
err
=
sk
->
prot
->
init
(
sk
);
err
=
sk
->
prot
->
init
(
sk
);
if
(
err
!=
0
)
{
if
(
err
)
inet_sock_release
(
sk
);
inet_sock_release
(
sk
);
return
err
;
}
}
}
out:
return
0
;
return
err
;
out_sk_free:
free_and_badtype:
sk_free
(
sk
);
return
-
ESOCKTNOSUPPORT
;
free_and_badperm:
sk_free
(
sk
);
return
-
EPERM
;
free_and_noproto:
sk_free
(
sk
);
sk_free
(
sk
);
return
-
EPROTONOSUPPORT
;
goto
out
;
do_oom:
return
-
ENOBUFS
;
}
}
...
@@ -474,7 +464,6 @@ static int inet_create(struct socket *sock, int protocol)
...
@@ -474,7 +464,6 @@ static int inet_create(struct socket *sock, int protocol)
* function we are destroying the object and from then on nobody
* function we are destroying the object and from then on nobody
* should refer to it.
* should refer to it.
*/
*/
int
inet_release
(
struct
socket
*
sock
)
int
inet_release
(
struct
socket
*
sock
)
{
{
struct
sock
*
sk
=
sock
->
sk
;
struct
sock
*
sk
=
sock
->
sk
;
...
@@ -498,7 +487,7 @@ int inet_release(struct socket *sock)
...
@@ -498,7 +487,7 @@ int inet_release(struct socket *sock)
sock
->
sk
=
NULL
;
sock
->
sk
=
NULL
;
sk
->
prot
->
close
(
sk
,
timeout
);
sk
->
prot
->
close
(
sk
,
timeout
);
}
}
return
(
0
)
;
return
0
;
}
}
/* It is off by default, see below. */
/* It is off by default, see below. */
...
@@ -506,19 +495,21 @@ int sysctl_ip_nonlocal_bind;
...
@@ -506,19 +495,21 @@ int sysctl_ip_nonlocal_bind;
static
int
inet_bind
(
struct
socket
*
sock
,
struct
sockaddr
*
uaddr
,
int
addr_len
)
static
int
inet_bind
(
struct
socket
*
sock
,
struct
sockaddr
*
uaddr
,
int
addr_len
)
{
{
struct
sockaddr_in
*
addr
=
(
struct
sockaddr_in
*
)
uaddr
;
struct
sockaddr_in
*
addr
=
(
struct
sockaddr_in
*
)
uaddr
;
struct
sock
*
sk
=
sock
->
sk
;
struct
sock
*
sk
=
sock
->
sk
;
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
unsigned
short
snum
;
unsigned
short
snum
;
int
chk_addr_ret
;
int
chk_addr_ret
;
int
err
;
int
err
;
/* If the socket has its own bind function then use it. (RAW) */
/* If the socket has its own bind function then use it. (RAW) */
if
(
sk
->
prot
->
bind
)
if
(
sk
->
prot
->
bind
)
{
return
sk
->
prot
->
bind
(
sk
,
uaddr
,
addr_len
);
err
=
sk
->
prot
->
bind
(
sk
,
uaddr
,
addr_len
);
goto
out
;
}
err
=
-
EINVAL
;
if
(
addr_len
<
sizeof
(
struct
sockaddr_in
))
if
(
addr_len
<
sizeof
(
struct
sockaddr_in
))
return
-
EINVAL
;
goto
out
;
chk_addr_ret
=
inet_addr_type
(
addr
->
sin_addr
.
s_addr
);
chk_addr_ret
=
inet_addr_type
(
addr
->
sin_addr
.
s_addr
);
...
@@ -529,17 +520,19 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
...
@@ -529,17 +520,19 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
* (ie. your servers still start up even if your ISDN link
* (ie. your servers still start up even if your ISDN link
* is temporarily down)
* is temporarily down)
*/
*/
if
(
sysctl_ip_nonlocal_bind
==
0
&&
err
=
-
EADDRNOTAVAIL
;
inet
->
freebind
==
0
&&
if
(
!
sysctl_ip_nonlocal_bind
&&
!
inet
->
freebind
&&
addr
->
sin_addr
.
s_addr
!=
INADDR_ANY
&&
addr
->
sin_addr
.
s_addr
!=
INADDR_ANY
&&
chk_addr_ret
!=
RTN_LOCAL
&&
chk_addr_ret
!=
RTN_LOCAL
&&
chk_addr_ret
!=
RTN_MULTICAST
&&
chk_addr_ret
!=
RTN_MULTICAST
&&
chk_addr_ret
!=
RTN_BROADCAST
)
chk_addr_ret
!=
RTN_BROADCAST
)
return
-
EADDRNOTAVAIL
;
goto
out
;
snum
=
ntohs
(
addr
->
sin_port
);
snum
=
ntohs
(
addr
->
sin_port
);
err
=
-
EACCES
;
if
(
snum
&&
snum
<
PROT_SOCK
&&
!
capable
(
CAP_NET_BIND_SERVICE
))
if
(
snum
&&
snum
<
PROT_SOCK
&&
!
capable
(
CAP_NET_BIND_SERVICE
))
return
-
EACCES
;
goto
out
;
/* We keep a pair of addresses. rcv_saddr is the one
/* We keep a pair of addresses. rcv_saddr is the one
* used by hash lookups, and saddr is used for transmit.
* used by hash lookups, and saddr is used for transmit.
...
@@ -553,17 +546,17 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
...
@@ -553,17 +546,17 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* Check these errors (active socket, double bind). */
/* Check these errors (active socket, double bind). */
err
=
-
EINVAL
;
err
=
-
EINVAL
;
if
(
sk
->
state
!=
TCP_CLOSE
||
inet
->
num
)
if
(
sk
->
state
!=
TCP_CLOSE
||
inet
->
num
)
goto
out
;
goto
out
_release_sock
;
inet
->
rcv_saddr
=
inet
->
saddr
=
addr
->
sin_addr
.
s_addr
;
inet
->
rcv_saddr
=
inet
->
saddr
=
addr
->
sin_addr
.
s_addr
;
if
(
chk_addr_ret
==
RTN_MULTICAST
||
chk_addr_ret
==
RTN_BROADCAST
)
if
(
chk_addr_ret
==
RTN_MULTICAST
||
chk_addr_ret
==
RTN_BROADCAST
)
inet
->
saddr
=
0
;
/* Use device */
inet
->
saddr
=
0
;
/* Use device */
/* Make sure we are allowed to bind here. */
/* Make sure we are allowed to bind here. */
if
(
sk
->
prot
->
get_port
(
sk
,
snum
)
!=
0
)
{
if
(
sk
->
prot
->
get_port
(
sk
,
snum
))
{
inet
->
saddr
=
inet
->
rcv_saddr
=
0
;
inet
->
saddr
=
inet
->
rcv_saddr
=
0
;
err
=
-
EADDRINUSE
;
err
=
-
EADDRINUSE
;
goto
out
;
goto
out
_release_sock
;
}
}
if
(
inet
->
rcv_saddr
)
if
(
inet
->
rcv_saddr
)
...
@@ -575,15 +568,16 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
...
@@ -575,15 +568,16 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
inet
->
dport
=
0
;
inet
->
dport
=
0
;
sk_dst_reset
(
sk
);
sk_dst_reset
(
sk
);
err
=
0
;
err
=
0
;
out:
out
_release_sock
:
release_sock
(
sk
);
release_sock
(
sk
);
out:
return
err
;
return
err
;
}
}
int
inet_dgram_connect
(
struct
socket
*
sock
,
struct
sockaddr
*
uaddr
,
int
inet_dgram_connect
(
struct
socket
*
sock
,
struct
sockaddr
*
uaddr
,
int
addr_len
,
int
flags
)
int
addr_len
,
int
flags
)
{
{
struct
sock
*
sk
=
sock
->
sk
;
struct
sock
*
sk
=
sock
->
sk
;
if
(
uaddr
->
sa_family
==
AF_UNSPEC
)
if
(
uaddr
->
sa_family
==
AF_UNSPEC
)
return
sk
->
prot
->
disconnect
(
sk
,
flags
);
return
sk
->
prot
->
disconnect
(
sk
,
flags
);
...
@@ -605,7 +599,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
...
@@ -605,7 +599,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
* Connect() does not allow to get error notifications
* Connect() does not allow to get error notifications
* without closing the socket.
* without closing the socket.
*/
*/
while
((
1
<<
sk
->
state
)
&
(
TCPF_SYN_SENT
|
TCPF_SYN_RECV
))
{
while
((
1
<<
sk
->
state
)
&
(
TCPF_SYN_SENT
|
TCPF_SYN_RECV
))
{
release_sock
(
sk
);
release_sock
(
sk
);
timeo
=
schedule_timeout
(
timeo
);
timeo
=
schedule_timeout
(
timeo
);
lock_sock
(
sk
);
lock_sock
(
sk
);
...
@@ -622,11 +616,10 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
...
@@ -622,11 +616,10 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
* Connect to a remote host. There is regrettably still a little
* Connect to a remote host. There is regrettably still a little
* TCP 'magic' in here.
* TCP 'magic' in here.
*/
*/
int
inet_stream_connect
(
struct
socket
*
sock
,
struct
sockaddr
*
uaddr
,
int
inet_stream_connect
(
struct
socket
*
sock
,
struct
sockaddr
*
uaddr
,
int
addr_len
,
int
flags
)
int
addr_len
,
int
flags
)
{
{
struct
sock
*
sk
=
sock
->
sk
;
struct
sock
*
sk
=
sock
->
sk
;
int
err
;
int
err
;
long
timeo
;
long
timeo
;
...
@@ -668,9 +661,9 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
...
@@ -668,9 +661,9 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
break
;
break
;
}
}
timeo
=
sock_sndtimeo
(
sk
,
flags
&
O_NONBLOCK
);
timeo
=
sock_sndtimeo
(
sk
,
flags
&
O_NONBLOCK
);
if
((
1
<<
sk
->
state
)
&
(
TCPF_SYN_SENT
|
TCPF_SYN_RECV
))
{
if
((
1
<<
sk
->
state
)
&
(
TCPF_SYN_SENT
|
TCPF_SYN_RECV
))
{
/* Error code is set above */
/* Error code is set above */
if
(
!
timeo
||
!
inet_wait_for_connect
(
sk
,
timeo
))
if
(
!
timeo
||
!
inet_wait_for_connect
(
sk
,
timeo
))
goto
out
;
goto
out
;
...
@@ -712,22 +705,22 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
...
@@ -712,22 +705,22 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
int
inet_accept
(
struct
socket
*
sock
,
struct
socket
*
newsock
,
int
flags
)
int
inet_accept
(
struct
socket
*
sock
,
struct
socket
*
newsock
,
int
flags
)
{
{
struct
sock
*
sk1
=
sock
->
sk
;
struct
sock
*
sk1
=
sock
->
sk
;
struct
sock
*
sk2
;
int
err
=
-
EINVAL
;
int
err
=
-
EINVAL
;
struct
sock
*
sk2
=
sk1
->
prot
->
accept
(
sk1
,
flags
,
&
err
);
if
((
sk2
=
sk1
->
prot
->
accept
(
sk1
,
flags
,
&
err
))
==
NULL
)
if
(
!
sk2
)
goto
do_err
;
goto
do_err
;
lock_sock
(
sk2
);
lock_sock
(
sk2
);
BUG_TRAP
((
1
<<
sk2
->
state
)
&
(
TCPF_ESTABLISHED
|
TCPF_CLOSE_WAIT
|
TCPF_CLOSE
));
BUG_TRAP
((
1
<<
sk2
->
state
)
&
(
TCPF_ESTABLISHED
|
TCPF_CLOSE_WAIT
|
TCPF_CLOSE
));
sock_graft
(
sk2
,
newsock
);
sock_graft
(
sk2
,
newsock
);
newsock
->
state
=
SS_CONNECTED
;
newsock
->
state
=
SS_CONNECTED
;
err
=
0
;
release_sock
(
sk2
);
release_sock
(
sk2
);
return
0
;
do_err:
do_err:
return
err
;
return
err
;
}
}
...
@@ -736,7 +729,6 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
...
@@ -736,7 +729,6 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
/*
/*
* This does both peername and sockname.
* This does both peername and sockname.
*/
*/
static
int
inet_getname
(
struct
socket
*
sock
,
struct
sockaddr
*
uaddr
,
static
int
inet_getname
(
struct
socket
*
sock
,
struct
sockaddr
*
uaddr
,
int
*
uaddr_len
,
int
peer
)
int
*
uaddr_len
,
int
peer
)
{
{
...
@@ -746,9 +738,9 @@ static int inet_getname(struct socket *sock, struct sockaddr *uaddr,
...
@@ -746,9 +738,9 @@ static int inet_getname(struct socket *sock, struct sockaddr *uaddr,
sin
->
sin_family
=
AF_INET
;
sin
->
sin_family
=
AF_INET
;
if
(
peer
)
{
if
(
peer
)
{
if
(
!
inet
->
dport
)
if
(
!
inet
->
dport
||
return
-
ENOTCONN
;
(((
1
<<
sk
->
state
)
&
(
TCPF_CLOSE
|
TCPF_SYN_SENT
))
&&
if
(((
1
<<
sk
->
state
)
&
(
TCPF_CLOSE
|
TCPF_SYN_SENT
))
&&
peer
==
1
)
peer
==
1
)
)
return
-
ENOTCONN
;
return
-
ENOTCONN
;
sin
->
sin_port
=
inet
->
dport
;
sin
->
sin_port
=
inet
->
dport
;
sin
->
sin_addr
.
s_addr
=
inet
->
daddr
;
sin
->
sin_addr
.
s_addr
=
inet
->
daddr
;
...
@@ -760,7 +752,7 @@ static int inet_getname(struct socket *sock, struct sockaddr *uaddr,
...
@@ -760,7 +752,7 @@ static int inet_getname(struct socket *sock, struct sockaddr *uaddr,
sin
->
sin_addr
.
s_addr
=
addr
;
sin
->
sin_addr
.
s_addr
=
addr
;
}
}
*
uaddr_len
=
sizeof
(
*
sin
);
*
uaddr_len
=
sizeof
(
*
sin
);
return
(
0
)
;
return
0
;
}
}
...
@@ -770,10 +762,8 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, int size,
...
@@ -770,10 +762,8 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, int size,
{
{
struct
sock
*
sk
=
sock
->
sk
;
struct
sock
*
sk
=
sock
->
sk
;
int
addr_len
=
0
;
int
addr_len
=
0
;
int
err
;
int
err
=
sk
->
prot
->
recvmsg
(
sk
,
msg
,
size
,
flags
&
MSG_DONTWAIT
,
flags
&
~
MSG_DONTWAIT
,
&
addr_len
);
err
=
sk
->
prot
->
recvmsg
(
sk
,
msg
,
size
,
flags
&
MSG_DONTWAIT
,
flags
&~
MSG_DONTWAIT
,
&
addr_len
);
if
(
err
>=
0
)
if
(
err
>=
0
)
msg
->
msg_namelen
=
addr_len
;
msg
->
msg_namelen
=
addr_len
;
return
err
;
return
err
;
...
@@ -803,12 +793,13 @@ int inet_shutdown(struct socket *sock, int how)
...
@@ -803,12 +793,13 @@ int inet_shutdown(struct socket *sock, int how)
how
++
;
/* maps 0->1 has the advantage of making bit 1 rcvs and
how
++
;
/* maps 0->1 has the advantage of making bit 1 rcvs and
1->2 bit 2 snds.
1->2 bit 2 snds.
2->3 */
2->3 */
if
((
how
&
~
SHUTDOWN_MASK
)
||
how
==
0
)
/* MAXINT->0 */
if
((
how
&
~
SHUTDOWN_MASK
)
||
!
how
)
/* MAXINT->0 */
return
-
EINVAL
;
return
-
EINVAL
;
lock_sock
(
sk
);
lock_sock
(
sk
);
if
(
sock
->
state
==
SS_CONNECTING
)
{
if
(
sock
->
state
==
SS_CONNECTING
)
{
if
((
1
<<
sk
->
state
)
&
(
TCPF_SYN_SENT
|
TCPF_SYN_RECV
|
TCPF_CLOSE
))
if
((
1
<<
sk
->
state
)
&
(
TCPF_SYN_SENT
|
TCPF_SYN_RECV
|
TCPF_CLOSE
))
sock
->
state
=
SS_DISCONNECTING
;
sock
->
state
=
SS_DISCONNECTING
;
else
else
sock
->
state
=
SS_CONNECTED
;
sock
->
state
=
SS_CONNECTED
;
...
@@ -858,38 +849,42 @@ int inet_shutdown(struct socket *sock, int how)
...
@@ -858,38 +849,42 @@ int inet_shutdown(struct socket *sock, int how)
static
int
inet_ioctl
(
struct
socket
*
sock
,
unsigned
int
cmd
,
unsigned
long
arg
)
static
int
inet_ioctl
(
struct
socket
*
sock
,
unsigned
int
cmd
,
unsigned
long
arg
)
{
{
struct
sock
*
sk
=
sock
->
sk
;
struct
sock
*
sk
=
sock
->
sk
;
int
err
;
int
err
=
0
;
int
pid
;
int
pid
;
switch
(
cmd
)
{
switch
(
cmd
)
{
case
FIOSETOWN
:
case
FIOSETOWN
:
case
SIOCSPGRP
:
case
SIOCSPGRP
:
err
=
get_user
(
pid
,
(
int
*
)
arg
);
if
(
get_user
(
pid
,
(
int
*
)
arg
))
if
(
err
)
err
=
-
EFAULT
;
return
err
;
else
if
(
current
->
pid
!=
pid
&&
if
(
current
->
pid
!=
pid
&&
current
->
pgrp
!=
-
pid
&&
current
->
pgrp
!=
-
pid
&&
!
capable
(
CAP_NET_ADMIN
))
!
capable
(
CAP_NET_ADMIN
))
return
-
EPERM
;
err
=
-
EPERM
;
else
sk
->
proc
=
pid
;
sk
->
proc
=
pid
;
return
(
0
)
;
break
;
case
FIOGETOWN
:
case
FIOGETOWN
:
case
SIOCGPGRP
:
case
SIOCGPGRP
:
return
put_user
(
sk
->
proc
,
(
int
*
)
arg
);
err
=
put_user
(
sk
->
proc
,
(
int
*
)
arg
);
break
;
case
SIOCGSTAMP
:
case
SIOCGSTAMP
:
if
(
sk
->
stamp
.
tv_sec
==
0
)
if
(
!
sk
->
stamp
.
tv_sec
)
return
-
ENOENT
;
err
=
-
ENOENT
;
e
rr
=
copy_to_user
((
void
*
)
arg
,
&
sk
->
stamp
,
sizeof
(
struct
timeval
));
e
lse
if
(
copy_to_user
((
void
*
)
arg
,
&
sk
->
stamp
,
if
(
err
)
sizeof
(
struct
timeval
))
)
err
=
-
EFAULT
;
err
=
-
EFAULT
;
return
err
;
break
;
case
SIOCADDRT
:
case
SIOCADDRT
:
case
SIOCDELRT
:
case
SIOCDELRT
:
case
SIOCRTMSG
:
case
SIOCRTMSG
:
return
(
ip_rt_ioctl
(
cmd
,(
void
*
)
arg
));
err
=
ip_rt_ioctl
(
cmd
,
(
void
*
)
arg
);
break
;
case
SIOCDARP
:
case
SIOCDARP
:
case
SIOCGARP
:
case
SIOCGARP
:
case
SIOCSARP
:
case
SIOCSARP
:
return
(
arp_ioctl
(
cmd
,(
void
*
)
arg
));
err
=
arp_ioctl
(
cmd
,
(
void
*
)
arg
);
break
;
case
SIOCGIFADDR
:
case
SIOCGIFADDR
:
case
SIOCSIFADDR
:
case
SIOCSIFADDR
:
case
SIOCGIFBRDADDR
:
case
SIOCGIFBRDADDR
:
...
@@ -901,80 +896,79 @@ static int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
...
@@ -901,80 +896,79 @@ static int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case
SIOCSIFPFLAGS
:
case
SIOCSIFPFLAGS
:
case
SIOCGIFPFLAGS
:
case
SIOCGIFPFLAGS
:
case
SIOCSIFFLAGS
:
case
SIOCSIFFLAGS
:
return
(
devinet_ioctl
(
cmd
,(
void
*
)
arg
));
err
=
devinet_ioctl
(
cmd
,
(
void
*
)
arg
);
break
;
case
SIOCGIFBR
:
case
SIOCGIFBR
:
case
SIOCSIFBR
:
case
SIOCSIFBR
:
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
#ifdef CONFIG_KMOD
#ifdef CONFIG_KMOD
if
(
br_ioctl_hook
==
NULL
)
if
(
!
br_ioctl_hook
)
request_module
(
"bridge"
);
request_module
(
"bridge"
);
#endif
#endif
if
(
br_ioctl_hook
!=
NULL
)
if
(
br_ioctl_hook
)
return
br_ioctl_hook
(
arg
);
err
=
br_ioctl_hook
(
arg
);
else
#endif
#endif
return
-
ENOPKG
;
err
=
-
ENOPKG
;
break
;
case
SIOCGIFVLAN
:
case
SIOCGIFVLAN
:
case
SIOCSIFVLAN
:
case
SIOCSIFVLAN
:
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#ifdef CONFIG_KMOD
#ifdef CONFIG_KMOD
if
(
vlan_ioctl_hook
==
NULL
)
if
(
!
vlan_ioctl_hook
)
request_module
(
"8021q"
);
request_module
(
"8021q"
);
#endif
#endif
if
(
vlan_ioctl_hook
!=
NULL
)
if
(
vlan_ioctl_hook
)
return
vlan_ioctl_hook
(
arg
);
err
=
vlan_ioctl_hook
(
arg
);
else
#endif
#endif
return
-
ENOPKG
;
err
=
-
ENOPKG
;
break
;
case
SIOCGIFDIVERT
:
case
SIOCGIFDIVERT
:
case
SIOCSIFDIVERT
:
case
SIOCSIFDIVERT
:
#ifdef CONFIG_NET_DIVERT
#ifdef CONFIG_NET_DIVERT
return
divert_ioctl
(
cmd
,
(
struct
divert_cf
*
)
arg
);
err
=
divert_ioctl
(
cmd
,
(
struct
divert_cf
*
)
arg
);
#else
#else
return
-
ENOPKG
;
err
=
-
ENOPKG
;
#endif
/* CONFIG_NET_DIVERT */
#endif
/* CONFIG_NET_DIVERT */
break
;
case
SIOCADDDLCI
:
case
SIOCADDDLCI
:
case
SIOCDELDLCI
:
case
SIOCDELDLCI
:
#ifdef CONFIG_DLCI
#ifdef CONFIG_DLCI
lock_kernel
();
lock_kernel
();
err
=
dlci_ioctl
(
cmd
,
(
void
*
)
arg
);
err
=
dlci_ioctl
(
cmd
,
(
void
*
)
arg
);
unlock_kernel
();
unlock_kernel
();
return
err
;
break
;
#endif
#elif CONFIG_DLCI_MODULE
#ifdef CONFIG_DLCI_MODULE
#ifdef CONFIG_KMOD
#ifdef CONFIG_KMOD
if
(
dlci_ioctl_hook
==
NULL
)
if
(
!
dlci_ioctl_hook
)
request_module
(
"dlci"
);
request_module
(
"dlci"
);
#endif
#endif
if
(
dlci_ioctl_hook
)
{
if
(
dlci_ioctl_hook
)
{
lock_kernel
();
lock_kernel
();
err
=
(
*
dlci_ioctl_hook
)(
cmd
,
(
void
*
)
arg
);
err
=
(
*
dlci_ioctl_hook
)(
cmd
,
(
void
*
)
arg
);
unlock_kernel
();
unlock_kernel
();
return
err
;
}
else
}
#endif
#endif
return
-
ENOPKG
;
err
=
-
ENOPKG
;
break
;
default:
default:
if
(
(
cmd
>=
SIOCDEVPRIVATE
)
&&
if
(
cmd
>=
SIOCDEVPRIVATE
&&
(
cmd
<=
(
SIOCDEVPRIVATE
+
15
)
))
cmd
<=
(
SIOCDEVPRIVATE
+
15
))
return
(
dev_ioctl
(
cmd
,(
void
*
)
arg
)
);
err
=
dev_ioctl
(
cmd
,
(
void
*
)
arg
);
else
#ifdef WIRELESS_EXT
#ifdef WIRELESS_EXT
if
((
cmd
>=
SIOCIWFIRST
)
&&
(
cmd
<=
SIOCIWLAST
))
if
(
cmd
>=
SIOCIWFIRST
&&
cmd
<=
SIOCIWLAST
)
return
(
dev_ioctl
(
cmd
,(
void
*
)
arg
));
err
=
dev_ioctl
(
cmd
,
(
void
*
)
arg
);
else
#endif
/* WIRELESS_EXT */
#endif
/* WIRELESS_EXT */
if
(
!
sk
->
prot
->
ioctl
||
if
(
sk
->
prot
->
ioctl
==
NULL
||
(
err
=
sk
->
prot
->
ioctl
(
sk
,
cmd
,
arg
))
==-
ENOIOCTLCMD
)
(
err
=
sk
->
prot
->
ioctl
(
sk
,
cmd
,
arg
))
==
return
(
dev_ioctl
(
cmd
,(
void
*
)
arg
));
-
ENOIOCTLCMD
)
return
err
;
err
=
dev_ioctl
(
cmd
,
(
void
*
)
arg
);
break
;
}
}
/*NOTREACHED*/
return
err
;
return
(
0
);
}
}
struct
proto_ops
inet_stream_ops
=
{
struct
proto_ops
inet_stream_ops
=
{
...
@@ -1067,8 +1061,7 @@ static struct inet_protosw inetsw_array[] =
...
@@ -1067,8 +1061,7 @@ static struct inet_protosw inetsw_array[] =
#define INETSW_ARRAY_LEN (sizeof(inetsw_array) / sizeof(struct inet_protosw))
#define INETSW_ARRAY_LEN (sizeof(inetsw_array) / sizeof(struct inet_protosw))
void
void
inet_register_protosw
(
struct
inet_protosw
*
p
)
inet_register_protosw
(
struct
inet_protosw
*
p
)
{
{
struct
list_head
*
lh
;
struct
list_head
*
lh
;
struct
inet_protosw
*
answer
;
struct
inet_protosw
*
answer
;
...
@@ -1115,8 +1108,7 @@ inet_register_protosw(struct inet_protosw *p)
...
@@ -1115,8 +1108,7 @@ inet_register_protosw(struct inet_protosw *p)
goto
out
;
goto
out
;
}
}
void
void
inet_unregister_protosw
(
struct
inet_protosw
*
p
)
inet_unregister_protosw
(
struct
inet_protosw
*
p
)
{
{
if
(
INET_PROTOSW_PERMANENT
&
p
->
flags
)
{
if
(
INET_PROTOSW_PERMANENT
&
p
->
flags
)
{
printk
(
KERN_ERR
printk
(
KERN_ERR
...
@@ -1164,25 +1156,25 @@ static int __init inet_init(void)
...
@@ -1164,25 +1156,25 @@ static int __init inet_init(void)
* Tell SOCKET that we are alive...
* Tell SOCKET that we are alive...
*/
*/
(
void
)
sock_register
(
&
inet_family_ops
);
(
void
)
sock_register
(
&
inet_family_ops
);
/*
/*
* Add all the protocols.
* Add all the protocols.
*/
*/
printk
(
KERN_INFO
"IP Protocols: "
);
printk
(
KERN_INFO
"IP Protocols: "
);
for
(
p
=
inet_protocol_base
;
p
!=
NULL
;)
{
for
(
p
=
inet_protocol_base
;
p
;)
{
struct
inet_protocol
*
tmp
=
(
struct
inet_protocol
*
)
p
->
next
;
struct
inet_protocol
*
tmp
=
(
struct
inet_protocol
*
)
p
->
next
;
inet_add_protocol
(
p
);
inet_add_protocol
(
p
);
printk
(
"%s%s"
,
p
->
name
,
tmp
?
", "
:
"
\n
"
);
printk
(
"%s%s"
,
p
->
name
,
tmp
?
", "
:
"
\n
"
);
p
=
tmp
;
p
=
tmp
;
}
}
/* Register the socket-side information for inet_create. */
/* Register the socket-side information for inet_create. */
for
(
r
=
&
inetsw
[
0
];
r
<
&
inetsw
[
SOCK_MAX
];
++
r
)
for
(
r
=
&
inetsw
[
0
];
r
<
&
inetsw
[
SOCK_MAX
];
++
r
)
INIT_LIST_HEAD
(
r
);
INIT_LIST_HEAD
(
r
);
for
(
q
=
inetsw_array
;
q
<
&
inetsw_array
[
INETSW_ARRAY_LEN
];
++
q
)
for
(
q
=
inetsw_array
;
q
<
&
inetsw_array
[
INETSW_ARRAY_LEN
];
++
q
)
inet_register_protosw
(
q
);
inet_register_protosw
(
q
);
/*
/*
...
...
net/ipv4/devinet.c
View file @
868f24fc
...
@@ -18,7 +18,8 @@
...
@@ -18,7 +18,8 @@
* Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
* Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
*
* Changes:
* Changes:
* Alexey Kuznetsov: pa_* fields are replaced with ifaddr lists.
* Alexey Kuznetsov: pa_* fields are replaced with ifaddr
* lists.
* Cyrus Durgin: updated for kmod
* Cyrus Durgin: updated for kmod
* Matthias Andree: in devinet_ioctl, compare label and
* Matthias Andree: in devinet_ioctl, compare label and
* address (4.4BSD alias style support),
* address (4.4BSD alias style support),
...
@@ -60,15 +61,29 @@
...
@@ -60,15 +61,29 @@
#include <net/route.h>
#include <net/route.h>
#include <net/ip_fib.h>
#include <net/ip_fib.h>
struct
ipv4_devconf
ipv4_devconf
=
{
1
,
1
,
1
,
1
,
0
,
};
struct
ipv4_devconf
ipv4_devconf
=
{
static
struct
ipv4_devconf
ipv4_devconf_dflt
=
{
1
,
1
,
1
,
1
,
1
,
};
accept_redirects:
1
,
send_redirects:
1
,
secure_redirects:
1
,
shared_media:
1
,
};
static
struct
ipv4_devconf
ipv4_devconf_dflt
=
{
accept_redirects:
1
,
send_redirects:
1
,
secure_redirects:
1
,
shared_media:
1
,
accept_source_route:
1
,
};
static
void
rtmsg_ifa
(
int
event
,
struct
in_ifaddr
*
);
static
void
rtmsg_ifa
(
int
event
,
struct
in_ifaddr
*
);
static
struct
notifier_block
*
inetaddr_chain
;
static
struct
notifier_block
*
inetaddr_chain
;
static
void
inet_del_ifa
(
struct
in_device
*
in_dev
,
struct
in_ifaddr
**
ifap
,
int
destroy
);
static
void
inet_del_ifa
(
struct
in_device
*
in_dev
,
struct
in_ifaddr
**
ifap
,
int
destroy
);
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_SYSCTL
static
void
devinet_sysctl_register
(
struct
in_device
*
in_dev
,
struct
ipv4_devconf
*
p
);
static
void
devinet_sysctl_register
(
struct
in_device
*
in_dev
,
struct
ipv4_devconf
*
p
);
static
void
devinet_sysctl_unregister
(
struct
ipv4_devconf
*
p
);
static
void
devinet_sysctl_unregister
(
struct
ipv4_devconf
*
p
);
#endif
#endif
...
@@ -79,12 +94,10 @@ int inet_dev_count;
...
@@ -79,12 +94,10 @@ int inet_dev_count;
rwlock_t
inetdev_lock
=
RW_LOCK_UNLOCKED
;
rwlock_t
inetdev_lock
=
RW_LOCK_UNLOCKED
;
static
struct
in_ifaddr
*
inet_alloc_ifa
(
void
)
static
struct
in_ifaddr
*
inet_alloc_ifa
(
void
)
{
{
struct
in_ifaddr
*
ifa
;
struct
in_ifaddr
*
ifa
=
kmalloc
(
sizeof
(
*
ifa
),
GFP_KERNEL
)
;
ifa
=
kmalloc
(
sizeof
(
*
ifa
),
GFP_KERNEL
);
if
(
ifa
)
{
if
(
ifa
)
{
memset
(
ifa
,
0
,
sizeof
(
*
ifa
));
memset
(
ifa
,
0
,
sizeof
(
*
ifa
));
inet_ifa_count
++
;
inet_ifa_count
++
;
...
@@ -105,18 +118,19 @@ void in_dev_finish_destroy(struct in_device *idev)
...
@@ -105,18 +118,19 @@ void in_dev_finish_destroy(struct in_device *idev)
{
{
struct
net_device
*
dev
=
idev
->
dev
;
struct
net_device
*
dev
=
idev
->
dev
;
BUG_TRAP
(
idev
->
ifa_list
==
NULL
);
BUG_TRAP
(
!
idev
->
ifa_list
);
BUG_TRAP
(
idev
->
mc_list
==
NULL
);
BUG_TRAP
(
!
idev
->
mc_list
);
#ifdef NET_REFCNT_DEBUG
#ifdef NET_REFCNT_DEBUG
printk
(
KERN_DEBUG
"in_dev_finish_destroy: %p=%s
\n
"
,
idev
,
dev
?
dev
->
name
:
"NIL"
);
printk
(
KERN_DEBUG
"in_dev_finish_destroy: %p=%s
\n
"
,
idev
,
dev
?
dev
->
name
:
"NIL"
);
#endif
#endif
dev_put
(
dev
);
dev_put
(
dev
);
if
(
!
idev
->
dead
)
{
if
(
!
idev
->
dead
)
printk
(
"Freeing alive in_device %p
\n
"
,
idev
);
printk
(
"Freeing alive in_device %p
\n
"
,
idev
);
return
;
else
{
}
inet_dev_count
--
;
inet_dev_count
--
;
kfree
(
idev
);
kfree
(
idev
);
}
}
}
struct
in_device
*
inetdev_init
(
struct
net_device
*
dev
)
struct
in_device
*
inetdev_init
(
struct
net_device
*
dev
)
...
@@ -127,21 +141,20 @@ struct in_device *inetdev_init(struct net_device *dev)
...
@@ -127,21 +141,20 @@ struct in_device *inetdev_init(struct net_device *dev)
in_dev
=
kmalloc
(
sizeof
(
*
in_dev
),
GFP_KERNEL
);
in_dev
=
kmalloc
(
sizeof
(
*
in_dev
),
GFP_KERNEL
);
if
(
!
in_dev
)
if
(
!
in_dev
)
return
NULL
;
goto
out
;
memset
(
in_dev
,
0
,
sizeof
(
*
in_dev
));
memset
(
in_dev
,
0
,
sizeof
(
*
in_dev
));
in_dev
->
lock
=
RW_LOCK_UNLOCKED
;
in_dev
->
lock
=
RW_LOCK_UNLOCKED
;
memcpy
(
&
in_dev
->
cnf
,
&
ipv4_devconf_dflt
,
sizeof
(
in_dev
->
cnf
));
memcpy
(
&
in_dev
->
cnf
,
&
ipv4_devconf_dflt
,
sizeof
(
in_dev
->
cnf
));
in_dev
->
cnf
.
sysctl
=
NULL
;
in_dev
->
cnf
.
sysctl
=
NULL
;
in_dev
->
dev
=
dev
;
in_dev
->
dev
=
dev
;
if
((
in_dev
->
arp_parms
=
neigh_parms_alloc
(
dev
,
&
arp_tbl
))
==
NULL
)
{
if
((
in_dev
->
arp_parms
=
neigh_parms_alloc
(
dev
,
&
arp_tbl
))
==
NULL
)
kfree
(
in_dev
);
goto
out_kfree
;
return
NULL
;
}
inet_dev_count
++
;
inet_dev_count
++
;
/* Reference in_dev->dev */
/* Reference in_dev->dev */
dev_hold
(
dev
);
dev_hold
(
dev
);
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_SYSCTL
neigh_sysctl_register
(
dev
,
in_dev
->
arp_parms
,
NET_IPV4
,
NET_IPV4_NEIGH
,
"ipv4"
);
neigh_sysctl_register
(
dev
,
in_dev
->
arp_parms
,
NET_IPV4
,
NET_IPV4_NEIGH
,
"ipv4"
);
#endif
#endif
write_lock_bh
(
&
inetdev_lock
);
write_lock_bh
(
&
inetdev_lock
);
dev
->
ip_ptr
=
in_dev
;
dev
->
ip_ptr
=
in_dev
;
...
@@ -151,9 +164,14 @@ struct in_device *inetdev_init(struct net_device *dev)
...
@@ -151,9 +164,14 @@ struct in_device *inetdev_init(struct net_device *dev)
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_SYSCTL
devinet_sysctl_register
(
in_dev
,
&
in_dev
->
cnf
);
devinet_sysctl_register
(
in_dev
,
&
in_dev
->
cnf
);
#endif
#endif
if
(
dev
->
flags
&
IFF_UP
)
if
(
dev
->
flags
&
IFF_UP
)
ip_mc_up
(
in_dev
);
ip_mc_up
(
in_dev
);
out:
return
in_dev
;
return
in_dev
;
out_kfree:
kfree
(
in_dev
);
in_dev
=
NULL
;
goto
out
;
}
}
static
void
inetdev_destroy
(
struct
in_device
*
in_dev
)
static
void
inetdev_destroy
(
struct
in_device
*
in_dev
)
...
@@ -199,8 +217,8 @@ int inet_addr_onlink(struct in_device *in_dev, u32 a, u32 b)
...
@@ -199,8 +217,8 @@ int inet_addr_onlink(struct in_device *in_dev, u32 a, u32 b)
return
0
;
return
0
;
}
}
static
void
static
void
inet_del_ifa
(
struct
in_device
*
in_dev
,
struct
in_ifaddr
**
ifap
,
inet_del_ifa
(
struct
in_device
*
in_dev
,
struct
in_ifaddr
**
ifap
,
int
destroy
)
int
destroy
)
{
{
struct
in_ifaddr
*
ifa1
=
*
ifap
;
struct
in_ifaddr
*
ifa1
=
*
ifap
;
...
@@ -208,12 +226,12 @@ inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy)
...
@@ -208,12 +226,12 @@ inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy)
/* 1. Deleting primary ifaddr forces deletion all secondaries */
/* 1. Deleting primary ifaddr forces deletion all secondaries */
if
(
!
(
ifa1
->
ifa_flags
&
IFA_F_SECONDARY
))
{
if
(
!
(
ifa1
->
ifa_flags
&
IFA_F_SECONDARY
))
{
struct
in_ifaddr
*
ifa
;
struct
in_ifaddr
*
ifa
;
struct
in_ifaddr
**
ifap1
=
&
ifa1
->
ifa_next
;
struct
in_ifaddr
**
ifap1
=
&
ifa1
->
ifa_next
;
while
((
ifa
=
*
ifap1
)
!=
NULL
)
{
while
((
ifa
=
*
ifap1
)
!=
NULL
)
{
if
(
!
(
ifa
->
ifa_flags
&
IFA_F_SECONDARY
)
||
if
(
!
(
ifa
->
ifa_flags
&
IFA_F_SECONDARY
)
||
ifa1
->
ifa_mask
!=
ifa
->
ifa_mask
||
ifa1
->
ifa_mask
!=
ifa
->
ifa_mask
||
!
inet_ifa_match
(
ifa1
->
ifa_address
,
ifa
))
{
!
inet_ifa_match
(
ifa1
->
ifa_address
,
ifa
))
{
ifap1
=
&
ifa
->
ifa_next
;
ifap1
=
&
ifa
->
ifa_next
;
...
@@ -250,20 +268,19 @@ inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy)
...
@@ -250,20 +268,19 @@ inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy)
if
(
destroy
)
{
if
(
destroy
)
{
inet_free_ifa
(
ifa1
);
inet_free_ifa
(
ifa1
);
if
(
in_dev
->
ifa_list
==
NULL
)
if
(
!
in_dev
->
ifa_list
)
inetdev_destroy
(
in_dev
);
inetdev_destroy
(
in_dev
);
}
}
}
}
static
int
static
int
inet_insert_ifa
(
struct
in_ifaddr
*
ifa
)
inet_insert_ifa
(
struct
in_ifaddr
*
ifa
)
{
{
struct
in_device
*
in_dev
=
ifa
->
ifa_dev
;
struct
in_device
*
in_dev
=
ifa
->
ifa_dev
;
struct
in_ifaddr
*
ifa1
,
**
ifap
,
**
last_primary
;
struct
in_ifaddr
*
ifa1
,
**
ifap
,
**
last_primary
;
ASSERT_RTNL
();
ASSERT_RTNL
();
if
(
ifa
->
ifa_local
==
0
)
{
if
(
!
ifa
->
ifa_local
)
{
inet_free_ifa
(
ifa
);
inet_free_ifa
(
ifa
);
return
0
;
return
0
;
}
}
...
@@ -271,10 +288,13 @@ inet_insert_ifa(struct in_ifaddr *ifa)
...
@@ -271,10 +288,13 @@ inet_insert_ifa(struct in_ifaddr *ifa)
ifa
->
ifa_flags
&=
~
IFA_F_SECONDARY
;
ifa
->
ifa_flags
&=
~
IFA_F_SECONDARY
;
last_primary
=
&
in_dev
->
ifa_list
;
last_primary
=
&
in_dev
->
ifa_list
;
for
(
ifap
=&
in_dev
->
ifa_list
;
(
ifa1
=*
ifap
)
!=
NULL
;
ifap
=&
ifa1
->
ifa_next
)
{
for
(
ifap
=
&
in_dev
->
ifa_list
;
(
ifa1
=
*
ifap
)
!=
NULL
;
if
(
!
(
ifa1
->
ifa_flags
&
IFA_F_SECONDARY
)
&&
ifa
->
ifa_scope
<=
ifa1
->
ifa_scope
)
ifap
=
&
ifa1
->
ifa_next
)
{
if
(
!
(
ifa1
->
ifa_flags
&
IFA_F_SECONDARY
)
&&
ifa
->
ifa_scope
<=
ifa1
->
ifa_scope
)
last_primary
=
&
ifa1
->
ifa_next
;
last_primary
=
&
ifa1
->
ifa_next
;
if
(
ifa1
->
ifa_mask
==
ifa
->
ifa_mask
&&
inet_ifa_match
(
ifa1
->
ifa_address
,
ifa
))
{
if
(
ifa1
->
ifa_mask
==
ifa
->
ifa_mask
&&
inet_ifa_match
(
ifa1
->
ifa_address
,
ifa
))
{
if
(
ifa1
->
ifa_local
==
ifa
->
ifa_local
)
{
if
(
ifa1
->
ifa_local
==
ifa
->
ifa_local
)
{
inet_free_ifa
(
ifa
);
inet_free_ifa
(
ifa
);
return
-
EEXIST
;
return
-
EEXIST
;
...
@@ -287,7 +307,7 @@ inet_insert_ifa(struct in_ifaddr *ifa)
...
@@ -287,7 +307,7 @@ inet_insert_ifa(struct in_ifaddr *ifa)
}
}
}
}
if
(
!
(
ifa
->
ifa_flags
&
IFA_F_SECONDARY
))
{
if
(
!
(
ifa
->
ifa_flags
&
IFA_F_SECONDARY
))
{
net_srandom
(
ifa
->
ifa_local
);
net_srandom
(
ifa
->
ifa_local
);
ifap
=
last_primary
;
ifap
=
last_primary
;
}
}
...
@@ -306,24 +326,23 @@ inet_insert_ifa(struct in_ifaddr *ifa)
...
@@ -306,24 +326,23 @@ inet_insert_ifa(struct in_ifaddr *ifa)
return
0
;
return
0
;
}
}
static
int
static
int
inet_set_ifa
(
struct
net_device
*
dev
,
struct
in_ifaddr
*
ifa
)
inet_set_ifa
(
struct
net_device
*
dev
,
struct
in_ifaddr
*
ifa
)
{
{
struct
in_device
*
in_dev
=
__in_dev_get
(
dev
);
struct
in_device
*
in_dev
=
__in_dev_get
(
dev
);
ASSERT_RTNL
();
ASSERT_RTNL
();
if
(
in_dev
==
NULL
)
{
if
(
!
in_dev
)
{
in_dev
=
inetdev_init
(
dev
);
in_dev
=
inetdev_init
(
dev
);
if
(
in_dev
==
NULL
)
{
if
(
!
in_dev
)
{
inet_free_ifa
(
ifa
);
inet_free_ifa
(
ifa
);
return
-
ENOBUFS
;
return
-
ENOBUFS
;
}
}
}
}
if
(
ifa
->
ifa_dev
!=
in_dev
)
{
if
(
ifa
->
ifa_dev
!=
in_dev
)
{
BUG_TRAP
(
ifa
->
ifa_dev
==
NULL
);
BUG_TRAP
(
!
ifa
->
ifa_dev
);
in_dev_hold
(
in_dev
);
in_dev_hold
(
in_dev
);
ifa
->
ifa_dev
=
in_dev
;
ifa
->
ifa_dev
=
in_dev
;
}
}
if
(
LOOPBACK
(
ifa
->
ifa_local
))
if
(
LOOPBACK
(
ifa
->
ifa_local
))
ifa
->
ifa_scope
=
RT_SCOPE_HOST
;
ifa
->
ifa_scope
=
RT_SCOPE_HOST
;
...
@@ -344,7 +363,8 @@ struct in_device *inetdev_by_index(int ifindex)
...
@@ -344,7 +363,8 @@ struct in_device *inetdev_by_index(int ifindex)
/* Called only from RTNL semaphored context. No locks. */
/* Called only from RTNL semaphored context. No locks. */
struct
in_ifaddr
*
inet_ifa_byprefix
(
struct
in_device
*
in_dev
,
u32
prefix
,
u32
mask
)
struct
in_ifaddr
*
inet_ifa_byprefix
(
struct
in_device
*
in_dev
,
u32
prefix
,
u32
mask
)
{
{
ASSERT_RTNL
();
ASSERT_RTNL
();
...
@@ -355,8 +375,7 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, u32 prefix, u32 ma
...
@@ -355,8 +375,7 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, u32 prefix, u32 ma
return
NULL
;
return
NULL
;
}
}
int
int
inet_rtm_deladdr
(
struct
sk_buff
*
skb
,
struct
nlmsghdr
*
nlh
,
void
*
arg
)
inet_rtm_deladdr
(
struct
sk_buff
*
skb
,
struct
nlmsghdr
*
nlh
,
void
*
arg
)
{
{
struct
rtattr
**
rta
=
arg
;
struct
rtattr
**
rta
=
arg
;
struct
in_device
*
in_dev
;
struct
in_device
*
in_dev
;
...
@@ -366,69 +385,79 @@ inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
...
@@ -366,69 +385,79 @@ inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
ASSERT_RTNL
();
ASSERT_RTNL
();
if
((
in_dev
=
inetdev_by_index
(
ifm
->
ifa_index
))
==
NULL
)
if
((
in_dev
=
inetdev_by_index
(
ifm
->
ifa_index
))
==
NULL
)
return
-
EADDRNOTAVAIL
;
goto
out
;
__in_dev_put
(
in_dev
);
__in_dev_put
(
in_dev
);
for
(
ifap
=&
in_dev
->
ifa_list
;
(
ifa
=*
ifap
)
!=
NULL
;
ifap
=&
ifa
->
ifa_next
)
{
for
(
ifap
=
&
in_dev
->
ifa_list
;
(
ifa
=
*
ifap
)
!=
NULL
;
if
((
rta
[
IFA_LOCAL
-
1
]
&&
memcmp
(
RTA_DATA
(
rta
[
IFA_LOCAL
-
1
]),
&
ifa
->
ifa_local
,
4
))
||
ifap
=
&
ifa
->
ifa_next
)
{
(
rta
[
IFA_LABEL
-
1
]
&&
strcmp
(
RTA_DATA
(
rta
[
IFA_LABEL
-
1
]),
ifa
->
ifa_label
))
||
if
((
rta
[
IFA_LOCAL
-
1
]
&&
(
rta
[
IFA_ADDRESS
-
1
]
&&
memcmp
(
RTA_DATA
(
rta
[
IFA_LOCAL
-
1
]),
&
ifa
->
ifa_local
,
4
))
||
(
rta
[
IFA_LABEL
-
1
]
&&
strcmp
(
RTA_DATA
(
rta
[
IFA_LABEL
-
1
]),
ifa
->
ifa_label
))
||
(
rta
[
IFA_ADDRESS
-
1
]
&&
(
ifm
->
ifa_prefixlen
!=
ifa
->
ifa_prefixlen
||
(
ifm
->
ifa_prefixlen
!=
ifa
->
ifa_prefixlen
||
!
inet_ifa_match
(
*
(
u32
*
)
RTA_DATA
(
rta
[
IFA_ADDRESS
-
1
]),
ifa
))))
!
inet_ifa_match
(
*
(
u32
*
)
RTA_DATA
(
rta
[
IFA_ADDRESS
-
1
]),
ifa
))))
continue
;
continue
;
inet_del_ifa
(
in_dev
,
ifap
,
1
);
inet_del_ifa
(
in_dev
,
ifap
,
1
);
return
0
;
return
0
;
}
}
out:
return
-
EADDRNOTAVAIL
;
return
-
EADDRNOTAVAIL
;
}
}
int
int
inet_rtm_newaddr
(
struct
sk_buff
*
skb
,
struct
nlmsghdr
*
nlh
,
void
*
arg
)
inet_rtm_newaddr
(
struct
sk_buff
*
skb
,
struct
nlmsghdr
*
nlh
,
void
*
arg
)
{
{
struct
rtattr
**
rta
=
arg
;
struct
rtattr
**
rta
=
arg
;
struct
net_device
*
dev
;
struct
net_device
*
dev
;
struct
in_device
*
in_dev
;
struct
in_device
*
in_dev
;
struct
ifaddrmsg
*
ifm
=
NLMSG_DATA
(
nlh
);
struct
ifaddrmsg
*
ifm
=
NLMSG_DATA
(
nlh
);
struct
in_ifaddr
*
ifa
;
struct
in_ifaddr
*
ifa
;
int
rc
=
-
EINVAL
;
ASSERT_RTNL
();
ASSERT_RTNL
();
if
(
ifm
->
ifa_prefixlen
>
32
||
rta
[
IFA_LOCAL
-
1
]
==
NULL
)
if
(
ifm
->
ifa_prefixlen
>
32
||
!
rta
[
IFA_LOCAL
-
1
]
)
return
-
EINVAL
;
goto
out
;
rc
=
-
ENODEV
;
if
((
dev
=
__dev_get_by_index
(
ifm
->
ifa_index
))
==
NULL
)
if
((
dev
=
__dev_get_by_index
(
ifm
->
ifa_index
))
==
NULL
)
return
-
ENODEV
;
goto
out
;
rc
=
-
ENOBUFS
;
if
((
in_dev
=
__in_dev_get
(
dev
))
==
NULL
)
{
if
((
in_dev
=
__in_dev_get
(
dev
))
==
NULL
)
{
in_dev
=
inetdev_init
(
dev
);
in_dev
=
inetdev_init
(
dev
);
if
(
!
in_dev
)
if
(
!
in_dev
)
return
-
ENOBUFS
;
goto
out
;
}
}
if
((
ifa
=
inet_alloc_ifa
())
==
NULL
)
if
((
ifa
=
inet_alloc_ifa
())
==
NULL
)
return
-
ENOBUFS
;
goto
out
;
if
(
rta
[
IFA_ADDRESS
-
1
]
==
NULL
)
if
(
!
rta
[
IFA_ADDRESS
-
1
]
)
rta
[
IFA_ADDRESS
-
1
]
=
rta
[
IFA_LOCAL
-
1
];
rta
[
IFA_ADDRESS
-
1
]
=
rta
[
IFA_LOCAL
-
1
];
memcpy
(
&
ifa
->
ifa_local
,
RTA_DATA
(
rta
[
IFA_LOCAL
-
1
]),
4
);
memcpy
(
&
ifa
->
ifa_local
,
RTA_DATA
(
rta
[
IFA_LOCAL
-
1
]),
4
);
memcpy
(
&
ifa
->
ifa_address
,
RTA_DATA
(
rta
[
IFA_ADDRESS
-
1
]),
4
);
memcpy
(
&
ifa
->
ifa_address
,
RTA_DATA
(
rta
[
IFA_ADDRESS
-
1
]),
4
);
ifa
->
ifa_prefixlen
=
ifm
->
ifa_prefixlen
;
ifa
->
ifa_prefixlen
=
ifm
->
ifa_prefixlen
;
ifa
->
ifa_mask
=
inet_make_mask
(
ifm
->
ifa_prefixlen
);
ifa
->
ifa_mask
=
inet_make_mask
(
ifm
->
ifa_prefixlen
);
if
(
rta
[
IFA_BROADCAST
-
1
])
if
(
rta
[
IFA_BROADCAST
-
1
])
memcpy
(
&
ifa
->
ifa_broadcast
,
RTA_DATA
(
rta
[
IFA_BROADCAST
-
1
]),
4
);
memcpy
(
&
ifa
->
ifa_broadcast
,
if
(
rta
[
IFA_ANYCAST
-
1
])
RTA_DATA
(
rta
[
IFA_BROADCAST
-
1
]),
4
);
memcpy
(
&
ifa
->
ifa_anycast
,
RTA_DATA
(
rta
[
IFA_ANYCAST
-
1
]),
4
);
if
(
rta
[
IFA_ANYCAST
-
1
])
memcpy
(
&
ifa
->
ifa_anycast
,
RTA_DATA
(
rta
[
IFA_ANYCAST
-
1
]),
4
);
ifa
->
ifa_flags
=
ifm
->
ifa_flags
;
ifa
->
ifa_flags
=
ifm
->
ifa_flags
;
ifa
->
ifa_scope
=
ifm
->
ifa_scope
;
ifa
->
ifa_scope
=
ifm
->
ifa_scope
;
in_dev_hold
(
in_dev
);
in_dev_hold
(
in_dev
);
ifa
->
ifa_dev
=
in_dev
;
ifa
->
ifa_dev
=
in_dev
;
if
(
rta
[
IFA_LABEL
-
1
])
if
(
rta
[
IFA_LABEL
-
1
])
memcpy
(
ifa
->
ifa_label
,
RTA_DATA
(
rta
[
IFA_LABEL
-
1
]),
IFNAMSIZ
);
memcpy
(
ifa
->
ifa_label
,
RTA_DATA
(
rta
[
IFA_LABEL
-
1
]),
IFNAMSIZ
);
else
else
memcpy
(
ifa
->
ifa_label
,
dev
->
name
,
IFNAMSIZ
);
memcpy
(
ifa
->
ifa_label
,
dev
->
name
,
IFNAMSIZ
);
return
inet_insert_ifa
(
ifa
);
rc
=
inet_insert_ifa
(
ifa
);
out:
return
rc
;
}
}
/*
/*
...
@@ -437,22 +466,22 @@ inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
...
@@ -437,22 +466,22 @@ inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
static
__inline__
int
inet_abc_len
(
u32
addr
)
static
__inline__
int
inet_abc_len
(
u32
addr
)
{
{
if
(
ZERONET
(
addr
))
int
rc
=
-
1
;
/* Something else, probably a multicast. */
return
0
;
if
(
ZERONET
(
addr
))
rc
=
0
;
else
{
addr
=
ntohl
(
addr
);
addr
=
ntohl
(
addr
);
if
(
IN_CLASSA
(
addr
))
return
8
;
if
(
IN_CLASSB
(
addr
))
return
16
;
if
(
IN_CLASSC
(
addr
))
return
24
;
/*
if
(
IN_CLASSA
(
addr
))
* Something else, probably a multicast.
rc
=
8
;
*/
else
if
(
IN_CLASSB
(
addr
))
rc
=
16
;
else
if
(
IN_CLASSC
(
addr
))
rc
=
24
;
}
return
-
1
;
return
rc
;
}
}
...
@@ -466,7 +495,7 @@ int devinet_ioctl(unsigned int cmd, void *arg)
...
@@ -466,7 +495,7 @@ int devinet_ioctl(unsigned int cmd, void *arg)
struct
in_ifaddr
*
ifa
=
NULL
;
struct
in_ifaddr
*
ifa
=
NULL
;
struct
net_device
*
dev
;
struct
net_device
*
dev
;
char
*
colon
;
char
*
colon
;
int
ret
=
0
;
int
ret
=
-
EFAULT
;
int
tryaddrmatch
=
0
;
int
tryaddrmatch
=
0
;
/*
/*
...
@@ -474,8 +503,8 @@ int devinet_ioctl(unsigned int cmd, void *arg)
...
@@ -474,8 +503,8 @@ int devinet_ioctl(unsigned int cmd, void *arg)
*/
*/
if
(
copy_from_user
(
&
ifr
,
arg
,
sizeof
(
struct
ifreq
)))
if
(
copy_from_user
(
&
ifr
,
arg
,
sizeof
(
struct
ifreq
)))
return
-
EFAULT
;
goto
out
;
ifr
.
ifr_name
[
IFNAMSIZ
-
1
]
=
0
;
ifr
.
ifr_name
[
IFNAMSIZ
-
1
]
=
0
;
/* save original address for comparison */
/* save original address for comparison */
memcpy
(
&
sin_orig
,
sin
,
sizeof
(
*
sin
));
memcpy
(
&
sin_orig
,
sin
,
sizeof
(
*
sin
));
...
@@ -503,43 +532,48 @@ int devinet_ioctl(unsigned int cmd, void *arg)
...
@@ -503,43 +532,48 @@ int devinet_ioctl(unsigned int cmd, void *arg)
break
;
break
;
case
SIOCSIFFLAGS
:
case
SIOCSIFFLAGS
:
ret
=
-
EACCES
;
if
(
!
capable
(
CAP_NET_ADMIN
))
if
(
!
capable
(
CAP_NET_ADMIN
))
return
-
EACCES
;
goto
out
;
break
;
break
;
case
SIOCSIFADDR
:
/* Set interface address (and family) */
case
SIOCSIFADDR
:
/* Set interface address (and family) */
case
SIOCSIFBRDADDR
:
/* Set the broadcast address */
case
SIOCSIFBRDADDR
:
/* Set the broadcast address */
case
SIOCSIFDSTADDR
:
/* Set the destination address */
case
SIOCSIFDSTADDR
:
/* Set the destination address */
case
SIOCSIFNETMASK
:
/* Set the netmask for the interface */
case
SIOCSIFNETMASK
:
/* Set the netmask for the interface */
ret
=
-
EACCES
;
if
(
!
capable
(
CAP_NET_ADMIN
))
if
(
!
capable
(
CAP_NET_ADMIN
))
return
-
EACCES
;
goto
out
;
ret
=
-
EINVAL
;
if
(
sin
->
sin_family
!=
AF_INET
)
if
(
sin
->
sin_family
!=
AF_INET
)
return
-
EINVAL
;
goto
out
;
break
;
break
;
default:
default:
return
-
EINVAL
;
ret
=
-
EINVAL
;
goto
out
;
}
}
dev_probe_lock
();
dev_probe_lock
();
rtnl_lock
();
rtnl_lock
();
if
((
dev
=
__dev_get_by_name
(
ifr
.
ifr_name
))
==
NULL
)
{
ret
=
-
ENODEV
;
ret
=
-
ENODEV
;
if
((
dev
=
__dev_get_by_name
(
ifr
.
ifr_name
))
==
NULL
)
goto
done
;
goto
done
;
}
if
(
colon
)
if
(
colon
)
*
colon
=
':'
;
*
colon
=
':'
;
if
((
in_dev
=
__in_dev_get
(
dev
))
!=
NULL
)
{
if
((
in_dev
=
__in_dev_get
(
dev
))
!=
NULL
)
{
if
(
tryaddrmatch
)
{
if
(
tryaddrmatch
)
{
/* Matthias Andree */
/* Matthias Andree */
/* compare label and address (4.4BSD style) */
/* compare label and address (4.4BSD style) */
/* note: we only do this for a limited set of ioctls
/* note: we only do this for a limited set of ioctls
and only if the original address family was AF_INET.
and only if the original address family was AF_INET.
This is checked above. */
This is checked above. */
for
(
ifap
=&
in_dev
->
ifa_list
;
(
ifa
=*
ifap
)
!=
NULL
;
ifap
=&
ifa
->
ifa_next
)
{
for
(
ifap
=
&
in_dev
->
ifa_list
;
(
ifa
=
*
ifap
)
!=
NULL
;
if
((
strcmp
(
ifr
.
ifr_name
,
ifa
->
ifa_label
)
==
0
)
ifap
=
&
ifa
->
ifa_next
)
{
&&
(
sin_orig
.
sin_addr
.
s_addr
==
ifa
->
ifa_address
))
{
if
(
!
strcmp
(
ifr
.
ifr_name
,
ifa
->
ifa_label
)
&&
sin_orig
.
sin_addr
.
s_addr
==
ifa
->
ifa_address
)
{
break
;
/* found */
break
;
/* found */
}
}
}
}
...
@@ -547,17 +581,17 @@ int devinet_ioctl(unsigned int cmd, void *arg)
...
@@ -547,17 +581,17 @@ int devinet_ioctl(unsigned int cmd, void *arg)
/* we didn't get a match, maybe the application is
/* we didn't get a match, maybe the application is
4.3BSD-style and passed in junk so we fall back to
4.3BSD-style and passed in junk so we fall back to
comparing just the label */
comparing just the label */
if
(
ifa
==
NULL
)
{
if
(
!
ifa
)
{
for
(
ifap
=&
in_dev
->
ifa_list
;
(
ifa
=*
ifap
)
!=
NULL
;
ifap
=&
ifa
->
ifa_next
)
for
(
ifap
=
&
in_dev
->
ifa_list
;
(
ifa
=
*
ifap
)
!=
NULL
;
if
(
strcmp
(
ifr
.
ifr_name
,
ifa
->
ifa_label
)
==
0
)
ifap
=
&
ifa
->
ifa_next
)
if
(
!
strcmp
(
ifr
.
ifr_name
,
ifa
->
ifa_label
))
break
;
break
;
}
}
}
}
if
(
ifa
==
NULL
&&
cmd
!=
SIOCSIFADDR
&&
cmd
!=
SIOCSIFFLAGS
)
{
ret
=
-
EADDRNOTAVAIL
;
ret
=
-
EADDRNOTAVAIL
;
if
(
!
ifa
&&
cmd
!=
SIOCSIFADDR
&&
cmd
!=
SIOCSIFFLAGS
)
goto
done
;
goto
done
;
}
switch
(
cmd
)
{
switch
(
cmd
)
{
case
SIOCGIFADDR
:
/* Get interface address */
case
SIOCGIFADDR
:
/* Get interface address */
...
@@ -578,11 +612,11 @@ int devinet_ioctl(unsigned int cmd, void *arg)
...
@@ -578,11 +612,11 @@ int devinet_ioctl(unsigned int cmd, void *arg)
case
SIOCSIFFLAGS
:
case
SIOCSIFFLAGS
:
if
(
colon
)
{
if
(
colon
)
{
if
(
ifa
==
NULL
)
{
ret
=
-
EADDRNOTAVAIL
;
ret
=
-
EADDRNOTAVAIL
;
if
(
!
ifa
)
break
;
break
;
}
ret
=
0
;
if
(
!
(
ifr
.
ifr_flags
&
IFF_UP
))
if
(
!
(
ifr
.
ifr_flags
&
IFF_UP
))
inet_del_ifa
(
in_dev
,
ifap
,
1
);
inet_del_ifa
(
in_dev
,
ifap
,
1
);
break
;
break
;
}
}
...
@@ -590,16 +624,14 @@ int devinet_ioctl(unsigned int cmd, void *arg)
...
@@ -590,16 +624,14 @@ int devinet_ioctl(unsigned int cmd, void *arg)
break
;
break
;
case
SIOCSIFADDR
:
/* Set interface address (and family) */
case
SIOCSIFADDR
:
/* Set interface address (and family) */
if
(
inet_abc_len
(
sin
->
sin_addr
.
s_addr
)
<
0
)
{
ret
=
-
EINVAL
;
ret
=
-
EINVAL
;
if
(
inet_abc_len
(
sin
->
sin_addr
.
s_addr
)
<
0
)
break
;
break
;
}
if
(
!
ifa
)
{
if
(
!
ifa
)
{
if
((
ifa
=
inet_alloc_ifa
())
==
NULL
)
{
ret
=
-
ENOBUFS
;
ret
=
-
ENOBUFS
;
if
((
ifa
=
inet_alloc_ifa
())
==
NULL
)
break
;
break
;
}
if
(
colon
)
if
(
colon
)
memcpy
(
ifa
->
ifa_label
,
ifr
.
ifr_name
,
IFNAMSIZ
);
memcpy
(
ifa
->
ifa_label
,
ifr
.
ifr_name
,
IFNAMSIZ
);
else
else
...
@@ -613,14 +645,15 @@ int devinet_ioctl(unsigned int cmd, void *arg)
...
@@ -613,14 +645,15 @@ int devinet_ioctl(unsigned int cmd, void *arg)
ifa
->
ifa_anycast
=
0
;
ifa
->
ifa_anycast
=
0
;
}
}
ifa
->
ifa_address
=
ifa
->
ifa_address
=
ifa
->
ifa_local
=
sin
->
sin_addr
.
s_addr
;
ifa
->
ifa_local
=
sin
->
sin_addr
.
s_addr
;
if
(
!
(
dev
->
flags
&
IFF_POINTOPOINT
))
{
if
(
!
(
dev
->
flags
&
IFF_POINTOPOINT
))
{
ifa
->
ifa_prefixlen
=
inet_abc_len
(
ifa
->
ifa_address
);
ifa
->
ifa_prefixlen
=
inet_abc_len
(
ifa
->
ifa_address
);
ifa
->
ifa_mask
=
inet_make_mask
(
ifa
->
ifa_prefixlen
);
ifa
->
ifa_mask
=
inet_make_mask
(
ifa
->
ifa_prefixlen
);
if
((
dev
->
flags
&
IFF_BROADCAST
)
&&
ifa
->
ifa_prefixlen
<
31
)
if
((
dev
->
flags
&
IFF_BROADCAST
)
&&
ifa
->
ifa_broadcast
=
ifa
->
ifa_address
|~
ifa
->
ifa_mask
;
ifa
->
ifa_prefixlen
<
31
)
ifa
->
ifa_broadcast
=
ifa
->
ifa_address
|
~
ifa
->
ifa_mask
;
}
else
{
}
else
{
ifa
->
ifa_prefixlen
=
32
;
ifa
->
ifa_prefixlen
=
32
;
ifa
->
ifa_mask
=
inet_make_mask
(
32
);
ifa
->
ifa_mask
=
inet_make_mask
(
32
);
...
@@ -629,6 +662,7 @@ int devinet_ioctl(unsigned int cmd, void *arg)
...
@@ -629,6 +662,7 @@ int devinet_ioctl(unsigned int cmd, void *arg)
break
;
break
;
case
SIOCSIFBRDADDR
:
/* Set the broadcast address */
case
SIOCSIFBRDADDR
:
/* Set the broadcast address */
ret
=
0
;
if
(
ifa
->
ifa_broadcast
!=
sin
->
sin_addr
.
s_addr
)
{
if
(
ifa
->
ifa_broadcast
!=
sin
->
sin_addr
.
s_addr
)
{
inet_del_ifa
(
in_dev
,
ifap
,
0
);
inet_del_ifa
(
in_dev
,
ifap
,
0
);
ifa
->
ifa_broadcast
=
sin
->
sin_addr
.
s_addr
;
ifa
->
ifa_broadcast
=
sin
->
sin_addr
.
s_addr
;
...
@@ -637,15 +671,16 @@ int devinet_ioctl(unsigned int cmd, void *arg)
...
@@ -637,15 +671,16 @@ int devinet_ioctl(unsigned int cmd, void *arg)
break
;
break
;
case
SIOCSIFDSTADDR
:
/* Set the destination address */
case
SIOCSIFDSTADDR
:
/* Set the destination address */
if
(
ifa
->
ifa_address
!=
sin
->
sin_addr
.
s_addr
)
{
ret
=
0
;
if
(
inet_abc_len
(
sin
->
sin_addr
.
s_addr
)
<
0
)
{
if
(
ifa
->
ifa_address
==
sin
->
sin_addr
.
s_addr
)
break
;
ret
=
-
EINVAL
;
ret
=
-
EINVAL
;
if
(
inet_abc_len
(
sin
->
sin_addr
.
s_addr
)
<
0
)
break
;
break
;
}
ret
=
0
;
inet_del_ifa
(
in_dev
,
ifap
,
0
);
inet_del_ifa
(
in_dev
,
ifap
,
0
);
ifa
->
ifa_address
=
sin
->
sin_addr
.
s_addr
;
ifa
->
ifa_address
=
sin
->
sin_addr
.
s_addr
;
inet_insert_ifa
(
ifa
);
inet_insert_ifa
(
ifa
);
}
break
;
break
;
case
SIOCSIFNETMASK
:
/* Set the netmask for the interface */
case
SIOCSIFNETMASK
:
/* Set the netmask for the interface */
...
@@ -653,11 +688,10 @@ int devinet_ioctl(unsigned int cmd, void *arg)
...
@@ -653,11 +688,10 @@ int devinet_ioctl(unsigned int cmd, void *arg)
/*
/*
* The mask we set must be legal.
* The mask we set must be legal.
*/
*/
if
(
bad_mask
(
sin
->
sin_addr
.
s_addr
,
0
))
{
ret
=
-
EINVAL
;
ret
=
-
EINVAL
;
if
(
bad_mask
(
sin
->
sin_addr
.
s_addr
,
0
))
break
;
break
;
}
ret
=
0
;
if
(
ifa
->
ifa_mask
!=
sin
->
sin_addr
.
s_addr
)
{
if
(
ifa
->
ifa_mask
!=
sin
->
sin_addr
.
s_addr
)
{
inet_del_ifa
(
in_dev
,
ifap
,
0
);
inet_del_ifa
(
in_dev
,
ifap
,
0
);
ifa
->
ifa_mask
=
sin
->
sin_addr
.
s_addr
;
ifa
->
ifa_mask
=
sin
->
sin_addr
.
s_addr
;
...
@@ -669,49 +703,51 @@ int devinet_ioctl(unsigned int cmd, void *arg)
...
@@ -669,49 +703,51 @@ int devinet_ioctl(unsigned int cmd, void *arg)
done:
done:
rtnl_unlock
();
rtnl_unlock
();
dev_probe_unlock
();
dev_probe_unlock
();
out:
return
ret
;
return
ret
;
rarok:
rarok:
rtnl_unlock
();
rtnl_unlock
();
dev_probe_unlock
();
dev_probe_unlock
();
if
(
copy_to_user
(
arg
,
&
ifr
,
sizeof
(
struct
ifreq
)))
ret
=
copy_to_user
(
arg
,
&
ifr
,
sizeof
(
struct
ifreq
))
?
-
EFAULT
:
0
;
return
-
EFAULT
;
goto
out
;
return
0
;
}
}
static
int
static
int
inet_gifconf
(
struct
net_device
*
dev
,
char
*
buf
,
int
len
)
inet_gifconf
(
struct
net_device
*
dev
,
char
*
buf
,
int
len
)
{
{
struct
in_device
*
in_dev
=
__in_dev_get
(
dev
);
struct
in_device
*
in_dev
=
__in_dev_get
(
dev
);
struct
in_ifaddr
*
ifa
;
struct
in_ifaddr
*
ifa
;
struct
ifreq
ifr
;
struct
ifreq
ifr
;
int
done
=
0
;
int
done
=
0
;
if
(
in_dev
==
NULL
||
(
ifa
=
in_dev
->
ifa_list
)
==
NULL
)
if
(
!
in_dev
||
(
ifa
=
in_dev
->
ifa_list
)
==
NULL
)
return
0
;
goto
out
;
for
(
;
ifa
;
ifa
=
ifa
->
ifa_next
)
{
for
(;
ifa
;
ifa
=
ifa
->
ifa_next
)
{
if
(
!
buf
)
{
if
(
!
buf
)
{
done
+=
sizeof
(
ifr
);
done
+=
sizeof
(
ifr
);
continue
;
continue
;
}
}
if
(
len
<
(
int
)
sizeof
(
ifr
))
if
(
len
<
(
int
)
sizeof
(
ifr
))
return
done
;
break
;
memset
(
&
ifr
,
0
,
sizeof
(
struct
ifreq
));
memset
(
&
ifr
,
0
,
sizeof
(
struct
ifreq
));
if
(
ifa
->
ifa_label
)
if
(
ifa
->
ifa_label
)
strcpy
(
ifr
.
ifr_name
,
ifa
->
ifa_label
);
strcpy
(
ifr
.
ifr_name
,
ifa
->
ifa_label
);
else
else
strcpy
(
ifr
.
ifr_name
,
dev
->
name
);
strcpy
(
ifr
.
ifr_name
,
dev
->
name
);
(
*
(
struct
sockaddr_in
*
)
&
ifr
.
ifr_addr
).
sin_family
=
AF_INET
;
(
*
(
struct
sockaddr_in
*
)
&
ifr
.
ifr_addr
).
sin_family
=
AF_INET
;
(
*
(
struct
sockaddr_in
*
)
&
ifr
.
ifr_addr
).
sin_addr
.
s_addr
=
ifa
->
ifa_local
;
(
*
(
struct
sockaddr_in
*
)
&
ifr
.
ifr_addr
).
sin_addr
.
s_addr
=
ifa
->
ifa_local
;
if
(
copy_to_user
(
buf
,
&
ifr
,
sizeof
(
struct
ifreq
)))
if
(
copy_to_user
(
buf
,
&
ifr
,
sizeof
(
struct
ifreq
)))
{
return
-
EFAULT
;
done
=
-
EFAULT
;
break
;
}
buf
+=
sizeof
(
struct
ifreq
);
buf
+=
sizeof
(
struct
ifreq
);
len
-=
sizeof
(
struct
ifreq
);
len
-=
sizeof
(
struct
ifreq
);
done
+=
sizeof
(
struct
ifreq
);
done
+=
sizeof
(
struct
ifreq
);
}
}
out:
return
done
;
return
done
;
}
}
...
@@ -722,10 +758,8 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
...
@@ -722,10 +758,8 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
read_lock
(
&
inetdev_lock
);
read_lock
(
&
inetdev_lock
);
in_dev
=
__in_dev_get
(
dev
);
in_dev
=
__in_dev_get
(
dev
);
if
(
in_dev
==
NULL
)
{
if
(
!
in_dev
)
read_unlock
(
&
inetdev_lock
);
goto
out_unlock_inetdev
;
return
0
;
}
read_lock
(
&
in_dev
->
lock
);
read_lock
(
&
in_dev
->
lock
);
for_primary_ifa
(
in_dev
)
{
for_primary_ifa
(
in_dev
)
{
...
@@ -742,7 +776,7 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
...
@@ -742,7 +776,7 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
read_unlock
(
&
inetdev_lock
);
read_unlock
(
&
inetdev_lock
);
if
(
addr
)
if
(
addr
)
return
addr
;
goto
out
;
/* Not loopback addresses on loopback should be preferred
/* Not loopback addresses on loopback should be preferred
in this case. It is importnat that lo is the first interface
in this case. It is importnat that lo is the first interface
...
@@ -750,8 +784,8 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
...
@@ -750,8 +784,8 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
*/
*/
read_lock
(
&
dev_base_lock
);
read_lock
(
&
dev_base_lock
);
read_lock
(
&
inetdev_lock
);
read_lock
(
&
inetdev_lock
);
for
(
dev
=
dev_base
;
dev
;
dev
=
dev
->
next
)
{
for
(
dev
=
dev_base
;
dev
;
dev
=
dev
->
next
)
{
if
((
in_dev
=
__in_dev_get
(
dev
))
==
NULL
)
if
((
in_dev
=
__in_dev_get
(
dev
))
==
NULL
)
continue
;
continue
;
read_lock
(
&
in_dev
->
lock
);
read_lock
(
&
in_dev
->
lock
);
...
@@ -759,17 +793,20 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
...
@@ -759,17 +793,20 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
if
(
ifa
->
ifa_scope
!=
RT_SCOPE_LINK
&&
if
(
ifa
->
ifa_scope
!=
RT_SCOPE_LINK
&&
ifa
->
ifa_scope
<=
scope
)
{
ifa
->
ifa_scope
<=
scope
)
{
read_unlock
(
&
in_dev
->
lock
);
read_unlock
(
&
in_dev
->
lock
);
read_unlock
(
&
inetdev_lock
);
addr
=
ifa
->
ifa_local
;
read_unlock
(
&
dev_base_lock
);
goto
out_unlock_both
;
return
ifa
->
ifa_local
;
}
}
}
endfor_ifa
(
in_dev
);
}
endfor_ifa
(
in_dev
);
read_unlock
(
&
in_dev
->
lock
);
read_unlock
(
&
in_dev
->
lock
);
}
}
out_unlock_both:
read_unlock
(
&
inetdev_lock
);
read_unlock
(
&
inetdev_lock
);
read_unlock
(
&
dev_base_lock
);
read_unlock
(
&
dev_base_lock
);
out:
return
0
;
return
addr
;
out_unlock_inetdev:
read_unlock
(
&
inetdev_lock
);
goto
out
;
}
}
/*
/*
...
@@ -783,20 +820,21 @@ int register_inetaddr_notifier(struct notifier_block *nb)
...
@@ -783,20 +820,21 @@ int register_inetaddr_notifier(struct notifier_block *nb)
int
unregister_inetaddr_notifier
(
struct
notifier_block
*
nb
)
int
unregister_inetaddr_notifier
(
struct
notifier_block
*
nb
)
{
{
return
notifier_chain_unregister
(
&
inetaddr_chain
,
nb
);
return
notifier_chain_unregister
(
&
inetaddr_chain
,
nb
);
}
}
/* Called only under RTNL semaphore */
/* Called only under RTNL semaphore */
static
int
inetdev_event
(
struct
notifier_block
*
this
,
unsigned
long
event
,
void
*
ptr
)
static
int
inetdev_event
(
struct
notifier_block
*
this
,
unsigned
long
event
,
void
*
ptr
)
{
{
struct
net_device
*
dev
=
ptr
;
struct
net_device
*
dev
=
ptr
;
struct
in_device
*
in_dev
=
__in_dev_get
(
dev
);
struct
in_device
*
in_dev
=
__in_dev_get
(
dev
);
ASSERT_RTNL
();
ASSERT_RTNL
();
if
(
in_dev
==
NULL
)
if
(
!
in_dev
)
return
NOTIFY_DONE
;
goto
out
;
switch
(
event
)
{
switch
(
event
)
{
case
NETDEV_REGISTER
:
case
NETDEV_REGISTER
:
...
@@ -843,7 +881,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, void
...
@@ -843,7 +881,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, void
}
}
break
;
break
;
}
}
out:
return
NOTIFY_DONE
;
return
NOTIFY_DONE
;
}
}
...
@@ -887,15 +925,14 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
...
@@ -887,15 +925,14 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
static
int
inet_dump_ifaddr
(
struct
sk_buff
*
skb
,
struct
netlink_callback
*
cb
)
static
int
inet_dump_ifaddr
(
struct
sk_buff
*
skb
,
struct
netlink_callback
*
cb
)
{
{
int
idx
,
ip_idx
;
int
idx
,
ip_idx
;
int
s_idx
,
s_ip_idx
;
struct
net_device
*
dev
;
struct
net_device
*
dev
;
struct
in_device
*
in_dev
;
struct
in_device
*
in_dev
;
struct
in_ifaddr
*
ifa
;
struct
in_ifaddr
*
ifa
;
int
s_ip_idx
,
s_idx
=
cb
->
args
[
0
];
s_idx
=
cb
->
args
[
0
];
s_ip_idx
=
ip_idx
=
cb
->
args
[
1
];
s_ip_idx
=
ip_idx
=
cb
->
args
[
1
];
read_lock
(
&
dev_base_lock
);
read_lock
(
&
dev_base_lock
);
for
(
dev
=
dev_base
,
idx
=
0
;
dev
;
dev
=
dev
->
next
,
idx
++
)
{
for
(
dev
=
dev_base
,
idx
=
0
;
dev
;
dev
=
dev
->
next
,
idx
++
)
{
if
(
idx
<
s_idx
)
if
(
idx
<
s_idx
)
continue
;
continue
;
if
(
idx
>
s_idx
)
if
(
idx
>
s_idx
)
...
@@ -911,7 +948,8 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
...
@@ -911,7 +948,8 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
if
(
ip_idx
<
s_ip_idx
)
if
(
ip_idx
<
s_ip_idx
)
continue
;
continue
;
if
(
inet_fill_ifaddr
(
skb
,
ifa
,
NETLINK_CB
(
cb
->
skb
).
pid
,
if
(
inet_fill_ifaddr
(
skb
,
ifa
,
NETLINK_CB
(
cb
->
skb
).
pid
,
cb
->
nlh
->
nlmsg_seq
,
RTM_NEWADDR
)
<=
0
)
{
cb
->
nlh
->
nlmsg_seq
,
RTM_NEWADDR
)
<=
0
)
{
read_unlock
(
&
in_dev
->
lock
);
read_unlock
(
&
in_dev
->
lock
);
read_unlock
(
&
inetdev_lock
);
read_unlock
(
&
inetdev_lock
);
goto
done
;
goto
done
;
...
@@ -929,65 +967,39 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
...
@@ -929,65 +967,39 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
return
skb
->
len
;
return
skb
->
len
;
}
}
static
void
rtmsg_ifa
(
int
event
,
struct
in_ifaddr
*
ifa
)
static
void
rtmsg_ifa
(
int
event
,
struct
in_ifaddr
*
ifa
)
{
{
struct
sk_buff
*
skb
;
int
size
=
NLMSG_SPACE
(
sizeof
(
struct
ifaddrmsg
)
+
128
)
;
int
size
=
NLMSG_SPACE
(
sizeof
(
struct
ifaddrmsg
)
+
128
);
struct
sk_buff
*
skb
=
alloc_skb
(
size
,
GFP_KERNEL
);
skb
=
alloc_skb
(
size
,
GFP_KERNEL
);
if
(
!
skb
)
if
(
!
skb
)
{
netlink_set_err
(
rtnl
,
0
,
RTMGRP_IPV4_IFADDR
,
ENOBUFS
);
netlink_set_err
(
rtnl
,
0
,
RTMGRP_IPV4_IFADDR
,
ENOBUFS
);
return
;
else
if
(
inet_fill_ifaddr
(
skb
,
ifa
,
0
,
0
,
event
)
<
0
)
{
}
if
(
inet_fill_ifaddr
(
skb
,
ifa
,
0
,
0
,
event
)
<
0
)
{
kfree_skb
(
skb
);
kfree_skb
(
skb
);
netlink_set_err
(
rtnl
,
0
,
RTMGRP_IPV4_IFADDR
,
EINVAL
);
netlink_set_err
(
rtnl
,
0
,
RTMGRP_IPV4_IFADDR
,
EINVAL
);
return
;
}
else
{
}
NETLINK_CB
(
skb
).
dst_groups
=
RTMGRP_IPV4_IFADDR
;
NETLINK_CB
(
skb
).
dst_groups
=
RTMGRP_IPV4_IFADDR
;
netlink_broadcast
(
rtnl
,
skb
,
0
,
RTMGRP_IPV4_IFADDR
,
GFP_KERNEL
);
netlink_broadcast
(
rtnl
,
skb
,
0
,
RTMGRP_IPV4_IFADDR
,
GFP_KERNEL
);
}
}
}
static
struct
rtnetlink_link
inet_rtnetlink_table
[
RTM_MAX
-
RTM_BASE
+
1
]
=
{
static
struct
rtnetlink_link
inet_rtnetlink_table
[
RTM_MAX
-
RTM_BASE
+
1
]
=
[
4
]
=
{
doit
:
inet_rtm_newaddr
,
},
{
[
5
]
=
{
doit
:
inet_rtm_deladdr
,
},
{
NULL
,
NULL
,
},
[
6
]
=
{
dumpit
:
inet_dump_ifaddr
,
},
{
NULL
,
NULL
,
},
[
8
]
=
{
doit
:
inet_rtm_newroute
,
},
{
NULL
,
NULL
,
},
[
9
]
=
{
doit
:
inet_rtm_delroute
,
},
{
NULL
,
NULL
,
},
[
10
]
=
{
doit
:
inet_rtm_getroute
,
dumpit
:
inet_dump_fib
,
},
{
inet_rtm_newaddr
,
NULL
,
},
{
inet_rtm_deladdr
,
NULL
,
},
{
NULL
,
inet_dump_ifaddr
,
},
{
NULL
,
NULL
,
},
{
inet_rtm_newroute
,
NULL
,
},
{
inet_rtm_delroute
,
NULL
,
},
{
inet_rtm_getroute
,
inet_dump_fib
,
},
{
NULL
,
NULL
,
},
{
NULL
,
NULL
,
},
{
NULL
,
NULL
,
},
{
NULL
,
NULL
,
},
{
NULL
,
NULL
,
},
#ifdef CONFIG_IP_MULTIPLE_TABLES
#ifdef CONFIG_IP_MULTIPLE_TABLES
{
inet_rtm_newrule
,
NULL
,
},
[
16
]
=
{
doit
:
inet_rtm_newrule
,
},
{
inet_rtm_delrule
,
NULL
,
},
[
17
]
=
{
doit
:
inet_rtm_delrule
,
},
{
NULL
,
inet_dump_rules
,
},
[
18
]
=
{
dumpit
:
inet_dump_rules
,
},
{
NULL
,
NULL
,
},
#else
{
NULL
,
NULL
,
},
{
NULL
,
NULL
,
},
{
NULL
,
NULL
,
},
{
NULL
,
NULL
,
},
#endif
#endif
};
};
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_SYSCTL
void
inet_forward_change
()
void
inet_forward_change
(
void
)
{
{
struct
net_device
*
dev
;
struct
net_device
*
dev
;
int
on
=
ipv4_devconf
.
forwarding
;
int
on
=
ipv4_devconf
.
forwarding
;
...
@@ -1009,15 +1021,13 @@ void inet_forward_change()
...
@@ -1009,15 +1021,13 @@ void inet_forward_change()
rt_cache_flush
(
0
);
rt_cache_flush
(
0
);
}
}
static
static
int
devinet_sysctl_forward
(
ctl_table
*
ctl
,
int
write
,
int
devinet_sysctl_forward
(
ctl_table
*
ctl
,
int
write
,
struct
file
*
filp
,
struct
file
*
filp
,
void
*
buffer
,
void
*
buffer
,
size_t
*
lenp
)
size_t
*
lenp
)
{
{
int
*
valp
=
ctl
->
data
;
int
*
valp
=
ctl
->
data
;
int
val
=
*
valp
;
int
val
=
*
valp
;
int
ret
;
int
ret
=
proc_dointvec
(
ctl
,
write
,
filp
,
buffer
,
lenp
);
ret
=
proc_dointvec
(
ctl
,
write
,
filp
,
buffer
,
lenp
);
if
(
write
&&
*
valp
!=
val
)
{
if
(
write
&&
*
valp
!=
val
)
{
if
(
valp
==
&
ipv4_devconf
.
forwarding
)
if
(
valp
==
&
ipv4_devconf
.
forwarding
)
...
@@ -1029,8 +1039,7 @@ int devinet_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
...
@@ -1029,8 +1039,7 @@ int devinet_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
return
ret
;
return
ret
;
}
}
static
struct
devinet_sysctl_table
static
struct
devinet_sysctl_table
{
{
struct
ctl_table_header
*
sysctl_header
;
struct
ctl_table_header
*
sysctl_header
;
ctl_table
devinet_vars
[
15
];
ctl_table
devinet_vars
[
15
];
ctl_table
devinet_dev
[
2
];
ctl_table
devinet_dev
[
2
];
...
@@ -1038,69 +1047,168 @@ static struct devinet_sysctl_table
...
@@ -1038,69 +1047,168 @@ static struct devinet_sysctl_table
ctl_table
devinet_proto_dir
[
2
];
ctl_table
devinet_proto_dir
[
2
];
ctl_table
devinet_root_dir
[
2
];
ctl_table
devinet_root_dir
[
2
];
}
devinet_sysctl
=
{
}
devinet_sysctl
=
{
NULL
,
devinet_vars:
{
{{
NET_IPV4_CONF_FORWARDING
,
"forwarding"
,
{
&
ipv4_devconf
.
forwarding
,
sizeof
(
int
),
0644
,
NULL
,
ctl_name:
NET_IPV4_CONF_FORWARDING
,
&
devinet_sysctl_forward
},
procname:
"forwarding"
,
{
NET_IPV4_CONF_MC_FORWARDING
,
"mc_forwarding"
,
data:
&
ipv4_devconf
.
forwarding
,
&
ipv4_devconf
.
mc_forwarding
,
sizeof
(
int
),
0444
,
NULL
,
maxlen:
sizeof
(
int
),
&
proc_dointvec
},
mode:
0644
,
{
NET_IPV4_CONF_ACCEPT_REDIRECTS
,
"accept_redirects"
,
proc_handler:
&
devinet_sysctl_forward
,
&
ipv4_devconf
.
accept_redirects
,
sizeof
(
int
),
0644
,
NULL
,
},
&
proc_dointvec
},
{
{
NET_IPV4_CONF_SECURE_REDIRECTS
,
"secure_redirects"
,
ctl_name:
NET_IPV4_CONF_MC_FORWARDING
,
&
ipv4_devconf
.
secure_redirects
,
sizeof
(
int
),
0644
,
NULL
,
procname:
"mc_forwarding"
,
&
proc_dointvec
},
data:
&
ipv4_devconf
.
mc_forwarding
,
{
NET_IPV4_CONF_SHARED_MEDIA
,
"shared_media"
,
maxlen:
sizeof
(
int
),
&
ipv4_devconf
.
shared_media
,
sizeof
(
int
),
0644
,
NULL
,
mode:
0444
,
&
proc_dointvec
},
proc_handler:
&
proc_dointvec
,
{
NET_IPV4_CONF_RP_FILTER
,
"rp_filter"
,
},
&
ipv4_devconf
.
rp_filter
,
sizeof
(
int
),
0644
,
NULL
,
{
&
proc_dointvec
},
ctl_name:
NET_IPV4_CONF_ACCEPT_REDIRECTS
,
{
NET_IPV4_CONF_SEND_REDIRECTS
,
"send_redirects"
,
procname:
"accept_redirects"
,
&
ipv4_devconf
.
send_redirects
,
sizeof
(
int
),
0644
,
NULL
,
data:
&
ipv4_devconf
.
accept_redirects
,
&
proc_dointvec
},
maxlen:
sizeof
(
int
),
{
NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE
,
"accept_source_route"
,
mode:
0644
,
&
ipv4_devconf
.
accept_source_route
,
sizeof
(
int
),
0644
,
NULL
,
proc_handler:
&
proc_dointvec
,
&
proc_dointvec
},
},
{
NET_IPV4_CONF_PROXY_ARP
,
"proxy_arp"
,
{
&
ipv4_devconf
.
proxy_arp
,
sizeof
(
int
),
0644
,
NULL
,
ctl_name:
NET_IPV4_CONF_SECURE_REDIRECTS
,
&
proc_dointvec
},
procname:
"secure_redirects"
,
{
NET_IPV4_CONF_MEDIUM_ID
,
"medium_id"
,
data:
&
ipv4_devconf
.
secure_redirects
,
&
ipv4_devconf
.
medium_id
,
sizeof
(
int
),
0644
,
NULL
,
maxlen:
sizeof
(
int
),
&
proc_dointvec
},
mode:
0644
,
{
NET_IPV4_CONF_BOOTP_RELAY
,
"bootp_relay"
,
proc_handler:
&
proc_dointvec
,
&
ipv4_devconf
.
bootp_relay
,
sizeof
(
int
),
0644
,
NULL
,
},
&
proc_dointvec
},
{
{
NET_IPV4_CONF_LOG_MARTIANS
,
"log_martians"
,
ctl_name:
NET_IPV4_CONF_SHARED_MEDIA
,
&
ipv4_devconf
.
log_martians
,
sizeof
(
int
),
0644
,
NULL
,
procname:
"shared_media"
,
&
proc_dointvec
},
data:
&
ipv4_devconf
.
shared_media
,
{
NET_IPV4_CONF_TAG
,
"tag"
,
maxlen:
sizeof
(
int
),
&
ipv4_devconf
.
tag
,
sizeof
(
int
),
0644
,
NULL
,
mode:
0644
,
&
proc_dointvec
},
proc_handler:
&
proc_dointvec
,
{
NET_IPV4_CONF_ARPFILTER
,
"arp_filter"
,
},
&
ipv4_devconf
.
arp_filter
,
sizeof
(
int
),
0644
,
NULL
,
{
&
proc_dointvec
},
ctl_name:
NET_IPV4_CONF_RP_FILTER
,
{
0
}},
procname:
"rp_filter"
,
data:
&
ipv4_devconf
.
rp_filter
,
{{
NET_PROTO_CONF_ALL
,
"all"
,
NULL
,
0
,
0555
,
devinet_sysctl
.
devinet_vars
},{
0
}},
maxlen:
sizeof
(
int
),
{{
NET_IPV4_CONF
,
"conf"
,
NULL
,
0
,
0555
,
devinet_sysctl
.
devinet_dev
},{
0
}},
mode:
0644
,
{{
NET_IPV4
,
"ipv4"
,
NULL
,
0
,
0555
,
devinet_sysctl
.
devinet_conf_dir
},{
0
}},
proc_handler:
&
proc_dointvec
,
{{
CTL_NET
,
"net"
,
NULL
,
0
,
0555
,
devinet_sysctl
.
devinet_proto_dir
},{
0
}}
},
{
ctl_name:
NET_IPV4_CONF_SEND_REDIRECTS
,
procname:
"send_redirects"
,
data:
&
ipv4_devconf
.
send_redirects
,
maxlen:
sizeof
(
int
),
mode:
0644
,
proc_handler:
&
proc_dointvec
,
},
{
ctl_name:
NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE
,
procname:
"accept_source_route"
,
data:
&
ipv4_devconf
.
accept_source_route
,
maxlen:
sizeof
(
int
),
mode:
0644
,
proc_handler:
&
proc_dointvec
,
},
{
ctl_name:
NET_IPV4_CONF_PROXY_ARP
,
procname:
"proxy_arp"
,
data:
&
ipv4_devconf
.
proxy_arp
,
maxlen:
sizeof
(
int
),
mode:
0644
,
proc_handler:
&
proc_dointvec
,
},
{
ctl_name:
NET_IPV4_CONF_MEDIUM_ID
,
procname:
"medium_id"
,
data:
&
ipv4_devconf
.
medium_id
,
maxlen:
sizeof
(
int
),
mode:
0644
,
proc_handler:
&
proc_dointvec
,
},
{
ctl_name:
NET_IPV4_CONF_BOOTP_RELAY
,
procname:
"bootp_relay"
,
data:
&
ipv4_devconf
.
bootp_relay
,
maxlen:
sizeof
(
int
),
mode:
0644
,
proc_handler:
&
proc_dointvec
,
},
{
ctl_name:
NET_IPV4_CONF_LOG_MARTIANS
,
procname:
"log_martians"
,
data:
&
ipv4_devconf
.
log_martians
,
maxlen:
sizeof
(
int
),
mode:
0644
,
proc_handler:
&
proc_dointvec
,
},
{
ctl_name:
NET_IPV4_CONF_TAG
,
procname:
"tag"
,
data:
&
ipv4_devconf
.
tag
,
maxlen:
sizeof
(
int
),
mode:
0644
,
proc_handler:
&
proc_dointvec
,
},
{
ctl_name:
NET_IPV4_CONF_ARPFILTER
,
procname:
"arp_filter"
,
data:
&
ipv4_devconf
.
arp_filter
,
maxlen:
sizeof
(
int
),
mode:
0644
,
proc_handler:
&
proc_dointvec
,
},
},
devinet_dev:
{
{
ctl_name:
NET_PROTO_CONF_ALL
,
procname:
"all"
,
mode:
0555
,
child:
devinet_sysctl
.
devinet_vars
,
},
},
devinet_conf_dir:
{
{
ctl_name:
NET_IPV4_CONF
,
procname:
"conf"
,
mode:
0555
,
child:
devinet_sysctl
.
devinet_dev
,
},
},
devinet_proto_dir:
{
{
ctl_name:
NET_IPV4
,
procname:
"ipv4"
,
mode:
0555
,
child:
devinet_sysctl
.
devinet_conf_dir
,
},
},
devinet_root_dir:
{
{
ctl_name:
CTL_NET
,
procname:
"net"
,
mode:
0555
,
child:
devinet_sysctl
.
devinet_proto_dir
,
},
},
};
};
static
void
devinet_sysctl_register
(
struct
in_device
*
in_dev
,
struct
ipv4_devconf
*
p
)
static
void
devinet_sysctl_register
(
struct
in_device
*
in_dev
,
struct
ipv4_devconf
*
p
)
{
{
int
i
;
int
i
;
struct
net_device
*
dev
=
in_dev
?
in_dev
->
dev
:
NULL
;
struct
net_device
*
dev
=
in_dev
?
in_dev
->
dev
:
NULL
;
struct
devinet_sysctl_table
*
t
;
struct
devinet_sysctl_table
*
t
=
kmalloc
(
sizeof
(
*
t
),
GFP_KERNEL
)
;
t
=
kmalloc
(
sizeof
(
*
t
),
GFP_KERNEL
);
if
(
!
t
)
if
(
t
==
NULL
)
return
;
return
;
memcpy
(
t
,
&
devinet_sysctl
,
sizeof
(
*
t
));
memcpy
(
t
,
&
devinet_sysctl
,
sizeof
(
*
t
));
for
(
i
=
0
;
i
<
sizeof
(
t
->
devinet_vars
)
/
sizeof
(
t
->
devinet_vars
[
0
])
-
1
;
i
++
)
{
for
(
i
=
0
;
t
->
devinet_vars
[
i
].
data
+=
(
char
*
)
p
-
(
char
*
)
&
ipv4_devconf
;
i
<
sizeof
(
t
->
devinet_vars
)
/
sizeof
(
t
->
devinet_vars
[
0
])
-
1
;
i
++
)
{
t
->
devinet_vars
[
i
].
data
+=
(
char
*
)
p
-
(
char
*
)
&
ipv4_devconf
;
t
->
devinet_vars
[
i
].
de
=
NULL
;
t
->
devinet_vars
[
i
].
de
=
NULL
;
}
}
if
(
dev
)
{
if
(
dev
)
{
...
@@ -1120,7 +1228,7 @@ static void devinet_sysctl_register(struct in_device *in_dev, struct ipv4_devcon
...
@@ -1120,7 +1228,7 @@ static void devinet_sysctl_register(struct in_device *in_dev, struct ipv4_devcon
t
->
devinet_root_dir
[
0
].
de
=
NULL
;
t
->
devinet_root_dir
[
0
].
de
=
NULL
;
t
->
sysctl_header
=
register_sysctl_table
(
t
->
devinet_root_dir
,
0
);
t
->
sysctl_header
=
register_sysctl_table
(
t
->
devinet_root_dir
,
0
);
if
(
t
->
sysctl_header
==
NULL
)
if
(
!
t
->
sysctl_header
)
kfree
(
t
);
kfree
(
t
);
else
else
p
->
sysctl
=
t
;
p
->
sysctl
=
t
;
...
...
net/ipv4/icmp.c
View file @
868f24fc
...
@@ -52,9 +52,10 @@
...
@@ -52,9 +52,10 @@
* the rates sysctl configurable.
* the rates sysctl configurable.
* Yu Tianli : Fixed two ugly bugs in icmp_send
* Yu Tianli : Fixed two ugly bugs in icmp_send
* - IP option length was accounted wrongly
* - IP option length was accounted wrongly
* - ICMP header length was not accounted at all.
* - ICMP header length was not accounted
* Tristan Greaves : Added sysctl option to ignore bogus broadcast
* at all.
* responses from broken routers.
* Tristan Greaves : Added sysctl option to ignore bogus
* broadcast responses from broken routers.
*
*
* To Fix:
* To Fix:
*
*
...
@@ -95,8 +96,7 @@
...
@@ -95,8 +96,7 @@
* Build xmit assembly blocks
* Build xmit assembly blocks
*/
*/
struct
icmp_bxm
struct
icmp_bxm
{
{
struct
sk_buff
*
skb
;
struct
sk_buff
*
skb
;
int
offset
;
int
offset
;
int
data_len
;
int
data_len
;
...
@@ -114,29 +114,76 @@ struct icmp_bxm
...
@@ -114,29 +114,76 @@ struct icmp_bxm
/*
/*
* Statistics
* Statistics
*/
*/
struct
icmp_mib
icmp_statistics
[
NR_CPUS
*
2
];
struct
icmp_mib
icmp_statistics
[
NR_CPUS
*
2
];
/* An array of errno for error messages from dest unreach. */
/* An array of errno for error messages from dest unreach. */
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOS_UNREACH and SR_FAIELD MUST be considered 'transient errs'. */
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOS_UNREACH and SR_FAIELD MUST be considered 'transient errs'. */
struct
icmp_err
icmp_err_convert
[]
=
{
struct
icmp_err
icmp_err_convert
[]
=
{
{
ENETUNREACH
,
0
},
/* ICMP_NET_UNREACH */
{
{
EHOSTUNREACH
,
0
},
/* ICMP_HOST_UNREACH */
errno:
ENETUNREACH
,
/* ICMP_NET_UNREACH */
{
ENOPROTOOPT
,
1
},
/* ICMP_PROT_UNREACH */
fatal:
0
,
{
ECONNREFUSED
,
1
},
/* ICMP_PORT_UNREACH */
},
{
EMSGSIZE
,
0
},
/* ICMP_FRAG_NEEDED */
{
{
EOPNOTSUPP
,
0
},
/* ICMP_SR_FAILED */
errno:
EHOSTUNREACH
,
/* ICMP_HOST_UNREACH */
{
ENETUNREACH
,
1
},
/* ICMP_NET_UNKNOWN */
fatal:
0
,
{
EHOSTDOWN
,
1
},
/* ICMP_HOST_UNKNOWN */
},
{
ENONET
,
1
},
/* ICMP_HOST_ISOLATED */
{
{
ENETUNREACH
,
1
},
/* ICMP_NET_ANO */
errno:
ENOPROTOOPT
/* ICMP_PROT_UNREACH */
,
{
EHOSTUNREACH
,
1
},
/* ICMP_HOST_ANO */
fatal:
1
,
{
ENETUNREACH
,
0
},
/* ICMP_NET_UNR_TOS */
},
{
EHOSTUNREACH
,
0
},
/* ICMP_HOST_UNR_TOS */
{
{
EHOSTUNREACH
,
1
},
/* ICMP_PKT_FILTERED */
errno:
ECONNREFUSED
,
/* ICMP_PORT_UNREACH */
{
EHOSTUNREACH
,
1
},
/* ICMP_PREC_VIOLATION */
fatal:
1
,
{
EHOSTUNREACH
,
1
}
/* ICMP_PREC_CUTOFF */
},
{
errno:
EMSGSIZE
,
/* ICMP_FRAG_NEEDED */
fatal:
0
,
},
{
errno:
EOPNOTSUPP
,
/* ICMP_SR_FAILED */
fatal:
0
,
},
{
errno:
ENETUNREACH
,
/* ICMP_NET_UNKNOWN */
fatal:
1
,
},
{
errno:
EHOSTDOWN
,
/* ICMP_HOST_UNKNOWN */
fatal:
1
,
},
{
errno:
ENONET
,
/* ICMP_HOST_ISOLATED */
fatal:
1
,
},
{
errno:
ENETUNREACH
,
/* ICMP_NET_ANO */
fatal:
1
,
},
{
errno:
EHOSTUNREACH
,
/* ICMP_HOST_ANO */
fatal:
1
,
},
{
errno:
ENETUNREACH
,
/* ICMP_NET_UNR_TOS */
fatal:
0
,
},
{
errno:
EHOSTUNREACH
,
/* ICMP_HOST_UNR_TOS */
fatal:
0
,
},
{
errno:
EHOSTUNREACH
,
/* ICMP_PKT_FILTERED */
fatal:
1
,
},
{
errno:
EHOSTUNREACH
,
/* ICMP_PREC_VIOLATION */
fatal:
1
,
},
{
errno:
EHOSTUNREACH
,
/* ICMP_PREC_CUTOFF */
fatal:
1
,
},
};
};
extern
int
sysctl_ip_default_ttl
;
extern
int
sysctl_ip_default_ttl
;
...
@@ -160,7 +207,7 @@ int sysctl_icmp_ignore_bogus_error_responses;
...
@@ -160,7 +207,7 @@ int sysctl_icmp_ignore_bogus_error_responses;
* time exceeded (11), parameter problem (12)
* time exceeded (11), parameter problem (12)
*/
*/
int
sysctl_icmp_ratelimit
=
1
*
HZ
;
int
sysctl_icmp_ratelimit
=
1
*
HZ
;
int
sysctl_icmp_ratemask
=
0x1818
;
int
sysctl_icmp_ratemask
=
0x1818
;
/*
/*
...
@@ -182,7 +229,6 @@ static struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
...
@@ -182,7 +229,6 @@ static struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
* our ICMP output as well as maintain a clean interface throughout
* our ICMP output as well as maintain a clean interface throughout
* all layers. All Socketless IP sends will soon be gone.
* all layers. All Socketless IP sends will soon be gone.
*/
*/
struct
socket
*
icmp_socket
;
struct
socket
*
icmp_socket
;
/* ICMPv4 socket is only a bit non-reenterable (unlike ICMPv6,
/* ICMPv4 socket is only a bit non-reenterable (unlike ICMPv6,
...
@@ -194,13 +240,17 @@ static int icmp_xmit_holder = -1;
...
@@ -194,13 +240,17 @@ static int icmp_xmit_holder = -1;
static
int
icmp_xmit_lock_bh
(
void
)
static
int
icmp_xmit_lock_bh
(
void
)
{
{
int
rc
;
if
(
!
spin_trylock
(
&
icmp_socket
->
sk
->
lock
.
slock
))
{
if
(
!
spin_trylock
(
&
icmp_socket
->
sk
->
lock
.
slock
))
{
rc
=
-
EAGAIN
;
if
(
icmp_xmit_holder
==
smp_processor_id
())
if
(
icmp_xmit_holder
==
smp_processor_id
())
return
-
EAGAIN
;
goto
out
;
spin_lock
(
&
icmp_socket
->
sk
->
lock
.
slock
);
spin_lock
(
&
icmp_socket
->
sk
->
lock
.
slock
);
}
}
rc
=
0
;
icmp_xmit_holder
=
smp_processor_id
();
icmp_xmit_holder
=
smp_processor_id
();
return
0
;
out:
return
rc
;
}
}
static
__inline__
int
icmp_xmit_lock
(
void
)
static
__inline__
int
icmp_xmit_lock
(
void
)
...
@@ -251,68 +301,75 @@ static __inline__ void icmp_xmit_unlock(void)
...
@@ -251,68 +301,75 @@ static __inline__ void icmp_xmit_unlock(void)
int
xrlim_allow
(
struct
dst_entry
*
dst
,
int
timeout
)
int
xrlim_allow
(
struct
dst_entry
*
dst
,
int
timeout
)
{
{
unsigned
long
now
;
unsigned
long
now
;
int
rc
=
0
;
now
=
jiffies
;
now
=
jiffies
;
dst
->
rate_tokens
+=
now
-
dst
->
rate_last
;
dst
->
rate_tokens
+=
now
-
dst
->
rate_last
;
dst
->
rate_last
=
now
;
dst
->
rate_last
=
now
;
if
(
dst
->
rate_tokens
>
XRLIM_BURST_FACTOR
*
timeout
)
if
(
dst
->
rate_tokens
>
XRLIM_BURST_FACTOR
*
timeout
)
dst
->
rate_tokens
=
XRLIM_BURST_FACTOR
*
timeout
;
dst
->
rate_tokens
=
XRLIM_BURST_FACTOR
*
timeout
;
if
(
dst
->
rate_tokens
>=
timeout
)
{
if
(
dst
->
rate_tokens
>=
timeout
)
{
dst
->
rate_tokens
-=
timeout
;
dst
->
rate_tokens
-=
timeout
;
r
eturn
1
;
r
c
=
1
;
}
}
return
0
;
return
rc
;
}
}
static
inline
int
icmpv4_xrlim_allow
(
struct
rtable
*
rt
,
int
type
,
int
code
)
static
inline
int
icmpv4_xrlim_allow
(
struct
rtable
*
rt
,
int
type
,
int
code
)
{
{
struct
dst_entry
*
dst
=
&
rt
->
u
.
dst
;
struct
dst_entry
*
dst
=
&
rt
->
u
.
dst
;
int
rc
=
1
;
if
(
type
>
NR_ICMP_TYPES
)
if
(
type
>
NR_ICMP_TYPES
)
return
1
;
goto
out
;
/* Don't limit PMTU discovery. */
/* Don't limit PMTU discovery. */
if
(
type
==
ICMP_DEST_UNREACH
&&
code
==
ICMP_FRAG_NEEDED
)
if
(
type
==
ICMP_DEST_UNREACH
&&
code
==
ICMP_FRAG_NEEDED
)
return
1
;
goto
out
;
/* No rate limit on loopback */
/* No rate limit on loopback */
if
(
dst
->
dev
&&
(
dst
->
dev
->
flags
&
IFF_LOOPBACK
))
if
(
dst
->
dev
&&
(
dst
->
dev
->
flags
&
IFF_LOOPBACK
))
return
1
;
goto
out
;
/* Limit if icmp type is enabled in ratemask. */
/* Limit if icmp type is enabled in ratemask. */
if
((
1
<<
type
)
&
sysctl_icmp_ratemask
)
if
((
1
<<
type
)
&
sysctl_icmp_ratemask
)
r
eturn
xrlim_allow
(
dst
,
sysctl_icmp_ratelimit
);
r
c
=
xrlim_allow
(
dst
,
sysctl_icmp_ratelimit
);
else
out:
return
1
;
return
rc
;
}
}
/*
/*
* Maintain the counters used in the SNMP statistics for outgoing ICMP
* Maintain the counters used in the SNMP statistics for outgoing ICMP
*/
*/
static
void
icmp_out_count
(
int
type
)
static
void
icmp_out_count
(
int
type
)
{
{
if
(
type
>
NR_ICMP_TYPES
)
if
(
type
<=
NR_ICMP_TYPES
)
{
return
;
(
icmp_pointers
[
type
].
output
)[(
smp_processor_id
()
*
2
+
(
icmp_pointers
[
type
].
output
)[(
smp_processor_id
()
*
2
+!
in_softirq
())
*
sizeof
(
struct
icmp_mib
)
/
sizeof
(
unsigned
long
)]
++
;
!
in_softirq
())
*
sizeof
(
struct
icmp_mib
)
/
sizeof
(
unsigned
long
)]
++
;
ICMP_INC_STATS
(
IcmpOutMsgs
);
ICMP_INC_STATS
(
IcmpOutMsgs
);
}
}
}
/*
/*
* Checksum each fragment, and on the first include the headers and final checksum.
* Checksum each fragment, and on the first include the headers and final
* checksum.
*/
*/
static
int
icmp_glue_bits
(
const
void
*
p
,
char
*
to
,
unsigned
int
offset
,
static
int
icmp_glue_bits
(
const
void
*
p
,
char
*
to
,
unsigned
int
offset
,
unsigned
int
fraglen
)
unsigned
int
fraglen
)
{
{
struct
icmp_bxm
*
icmp_param
=
(
struct
icmp_bxm
*
)
p
;
struct
icmp_bxm
*
icmp_param
=
(
struct
icmp_bxm
*
)
p
;
struct
icmphdr
*
icmph
;
struct
icmphdr
*
icmph
;
unsigned
int
csum
;
unsigned
int
csum
;
if
(
offset
)
{
if
(
offset
)
{
icmp_param
->
csum
=
skb_copy_and_csum_bits
(
icmp_param
->
skb
,
icmp_param
->
csum
=
icmp_param
->
offset
+
(
offset
-
icmp_param
->
head_len
),
skb_copy_and_csum_bits
(
icmp_param
->
skb
,
to
,
fraglen
,
icmp_param
->
csum
);
icmp_param
->
offset
+
return
0
;
(
offset
-
icmp_param
->
head_len
),
to
,
fraglen
,
icmp_param
->
csum
);
goto
out
;
}
}
/*
/*
...
@@ -323,13 +380,12 @@ static int icmp_glue_bits(const void *p, char *to, unsigned int offset, unsigned
...
@@ -323,13 +380,12 @@ static int icmp_glue_bits(const void *p, char *to, unsigned int offset, unsigned
csum
=
csum_partial_copy_nocheck
((
void
*
)
&
icmp_param
->
data
,
csum
=
csum_partial_copy_nocheck
((
void
*
)
&
icmp_param
->
data
,
to
,
icmp_param
->
head_len
,
to
,
icmp_param
->
head_len
,
icmp_param
->
csum
);
icmp_param
->
csum
);
csum
=
skb_copy_and_csum_bits
(
icmp_param
->
skb
,
csum
=
skb_copy_and_csum_bits
(
icmp_param
->
skb
,
icmp_param
->
offset
,
icmp_param
->
offset
,
to
+
icmp_param
->
head_len
,
to
+
icmp_param
->
head_len
,
fraglen
-
icmp_param
->
head_len
,
csum
);
fraglen
-
icmp_param
->
head_len
,
icmph
=
(
struct
icmphdr
*
)
to
;
csum
);
icmph
=
(
struct
icmphdr
*
)
to
;
icmph
->
checksum
=
csum_fold
(
csum
);
icmph
->
checksum
=
csum_fold
(
csum
);
out:
return
0
;
return
0
;
}
}
...
@@ -339,20 +395,18 @@ static int icmp_glue_bits(const void *p, char *to, unsigned int offset, unsigned
...
@@ -339,20 +395,18 @@ static int icmp_glue_bits(const void *p, char *to, unsigned int offset, unsigned
static
void
icmp_reply
(
struct
icmp_bxm
*
icmp_param
,
struct
sk_buff
*
skb
)
static
void
icmp_reply
(
struct
icmp_bxm
*
icmp_param
,
struct
sk_buff
*
skb
)
{
{
struct
sock
*
sk
=
icmp_socket
->
sk
;
struct
sock
*
sk
=
icmp_socket
->
sk
;
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
struct
ipcm_cookie
ipc
;
struct
ipcm_cookie
ipc
;
struct
rtable
*
rt
=
(
struct
rtable
*
)
skb
->
dst
;
struct
rtable
*
rt
=
(
struct
rtable
*
)
skb
->
dst
;
u32
daddr
;
u32
daddr
;
if
(
ip_options_echo
(
&
icmp_param
->
replyopts
,
skb
))
if
(
ip_options_echo
(
&
icmp_param
->
replyopts
,
skb
)
||
return
;
icmp_xmit_lock_bh
())
goto
out
;
if
(
icmp_xmit_lock_bh
())
return
;
icmp_param
->
data
.
icmph
.
checksum
=
0
;
icmp_param
->
data
.
icmph
.
checksum
=
0
;
icmp_param
->
csum
=
0
;
icmp_param
->
csum
=
0
;
icmp_out_count
(
icmp_param
->
data
.
icmph
.
type
);
icmp_out_count
(
icmp_param
->
data
.
icmph
.
type
);
inet
->
tos
=
skb
->
nh
.
iph
->
tos
;
inet
->
tos
=
skb
->
nh
.
iph
->
tos
;
...
@@ -364,8 +418,9 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
...
@@ -364,8 +418,9 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
if
(
ipc
.
opt
->
srr
)
if
(
ipc
.
opt
->
srr
)
daddr
=
icmp_param
->
replyopts
.
faddr
;
daddr
=
icmp_param
->
replyopts
.
faddr
;
}
}
if
(
ip_route_output
(
&
rt
,
daddr
,
rt
->
rt_spec_dst
,
RT_TOS
(
skb
->
nh
.
iph
->
tos
),
0
))
if
(
ip_route_output
(
&
rt
,
daddr
,
rt
->
rt_spec_dst
,
goto
out
;
RT_TOS
(
skb
->
nh
.
iph
->
tos
),
0
))
goto
out_unlock
;
if
(
icmpv4_xrlim_allow
(
rt
,
icmp_param
->
data
.
icmph
.
type
,
if
(
icmpv4_xrlim_allow
(
rt
,
icmp_param
->
data
.
icmph
.
type
,
icmp_param
->
data
.
icmph
.
code
))
{
icmp_param
->
data
.
icmph
.
code
))
{
ip_build_xmit
(
sk
,
icmp_glue_bits
,
icmp_param
,
ip_build_xmit
(
sk
,
icmp_glue_bits
,
icmp_param
,
...
@@ -373,15 +428,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
...
@@ -373,15 +428,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
&
ipc
,
rt
,
MSG_DONTWAIT
);
&
ipc
,
rt
,
MSG_DONTWAIT
);
}
}
ip_rt_put
(
rt
);
ip_rt_put
(
rt
);
out:
out
_unlock
:
icmp_xmit_unlock_bh
();
icmp_xmit_unlock_bh
();
out:
;
}
}
/*
/*
* Send an ICMP message in response to a situation
* Send an ICMP message in response to a situation
*
*
* RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header. MAY send more (we do).
* RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header.
* MAY send more (we do).
* MUST NOT change this header information.
* MUST NOT change this header information.
* MUST NOT reply to a multicast/broadcast IP address.
* MUST NOT reply to a multicast/broadcast IP address.
* MUST NOT reply to a multicast/broadcast MAC address.
* MUST NOT reply to a multicast/broadcast MAC address.
...
@@ -393,13 +450,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
...
@@ -393,13 +450,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
struct
iphdr
*
iph
;
struct
iphdr
*
iph
;
int
room
;
int
room
;
struct
icmp_bxm
icmp_param
;
struct
icmp_bxm
icmp_param
;
struct
rtable
*
rt
=
(
struct
rtable
*
)
skb_in
->
dst
;
struct
rtable
*
rt
=
(
struct
rtable
*
)
skb_in
->
dst
;
struct
ipcm_cookie
ipc
;
struct
ipcm_cookie
ipc
;
u32
saddr
;
u32
saddr
;
u8
tos
;
u8
tos
;
if
(
!
rt
)
if
(
!
rt
)
return
;
goto
out
;
/*
/*
* Find the original header. It is expected to be valid, of course.
* Find the original header. It is expected to be valid, of course.
...
@@ -408,56 +465,57 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
...
@@ -408,56 +465,57 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
*/
*/
iph
=
skb_in
->
nh
.
iph
;
iph
=
skb_in
->
nh
.
iph
;
if
((
u8
*
)
iph
<
skb_in
->
head
||
(
u8
*
)(
iph
+
1
)
>
skb_in
->
tail
)
if
((
u8
*
)
iph
<
skb_in
->
head
||
(
u8
*
)(
iph
+
1
)
>
skb_in
->
tail
)
return
;
goto
out
;
/*
/*
* No replies to physical multicast/broadcast
* No replies to physical multicast/broadcast
*/
*/
if
(
skb_in
->
pkt_type
!=
PACKET_HOST
)
if
(
skb_in
->
pkt_type
!=
PACKET_HOST
)
return
;
goto
out
;
/*
/*
* Now check at the protocol level
* Now check at the protocol level
*/
*/
if
(
rt
->
rt_flags
&
(
RTCF_BROADCAST
|
RTCF_MULTICAST
))
if
(
rt
->
rt_flags
&
(
RTCF_BROADCAST
|
RTCF_MULTICAST
))
return
;
goto
out
;
/*
/*
* Only reply to fragment 0. We byte re-order the constant
* Only reply to fragment 0. We byte re-order the constant
* mask for efficiency.
* mask for efficiency.
*/
*/
if
(
iph
->
frag_off
&
htons
(
IP_OFFSET
))
if
(
iph
->
frag_off
&
htons
(
IP_OFFSET
))
return
;
goto
out
;
/*
/*
* If we send an ICMP error to an ICMP error a mess would result..
* If we send an ICMP error to an ICMP error a mess would result..
*/
*/
if
(
icmp_pointers
[
type
].
error
)
{
if
(
icmp_pointers
[
type
].
error
)
{
/*
/*
* We are an error, check if we are replying to an ICMP error
* We are an error, check if we are replying to an
* ICMP error
*/
*/
if
(
iph
->
protocol
==
IPPROTO_ICMP
)
{
if
(
iph
->
protocol
==
IPPROTO_ICMP
)
{
u8
inner_type
;
u8
inner_type
;
if
(
skb_copy_bits
(
skb_in
,
if
(
skb_copy_bits
(
skb_in
,
skb_in
->
nh
.
raw
+
(
iph
->
ihl
<<
2
)
skb_in
->
nh
.
raw
+
(
iph
->
ihl
<<
2
)
+
+
offsetof
(
struct
icmphdr
,
type
)
offsetof
(
struct
icmphdr
,
type
)
-
-
skb_in
->
data
,
skb_in
->
data
,
&
inner_type
,
1
))
&
inner_type
,
1
))
goto
out
;
return
;
/*
/*
* Assume any unknown ICMP type is an error. This
isn't
* Assume any unknown ICMP type is an error. This
* specified by the RFC, but think about it..
*
isn't
specified by the RFC, but think about it..
*/
*/
if
(
inner_type
>
NR_ICMP_TYPES
||
icmp_pointers
[
inner_type
].
error
)
if
(
inner_type
>
NR_ICMP_TYPES
||
return
;
icmp_pointers
[
inner_type
].
error
)
goto
out
;
}
}
}
}
if
(
icmp_xmit_lock
())
if
(
icmp_xmit_lock
())
return
;
goto
out
;
/*
/*
* Construct source address and options.
* Construct source address and options.
...
@@ -467,7 +525,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
...
@@ -467,7 +525,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
/*
/*
* Restore original addresses if packet has been translated.
* Restore original addresses if packet has been translated.
*/
*/
if
(
rt
->
rt_flags
&
RTCF_NAT
&&
IPCB
(
skb_in
)
->
flags
&
IPSKB_TRANSLATED
)
{
if
(
rt
->
rt_flags
&
RTCF_NAT
&&
IPCB
(
skb_in
)
->
flags
&
IPSKB_TRANSLATED
)
{
iph
->
daddr
=
rt
->
key
.
dst
;
iph
->
daddr
=
rt
->
key
.
dst
;
iph
->
saddr
=
rt
->
key
.
src
;
iph
->
saddr
=
rt
->
key
.
src
;
}
}
...
@@ -477,12 +535,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
...
@@ -477,12 +535,12 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
if
(
!
(
rt
->
rt_flags
&
RTCF_LOCAL
))
if
(
!
(
rt
->
rt_flags
&
RTCF_LOCAL
))
saddr
=
0
;
saddr
=
0
;
tos
=
icmp_pointers
[
type
].
error
?
tos
=
icmp_pointers
[
type
].
error
?
((
iph
->
tos
&
IPTOS_TOS_MASK
)
|
((
iph
->
tos
&
IPTOS_TOS_MASK
)
|
IPTOS_PREC_INTERNETCONTROL
)
:
IPTOS_PREC_INTERNETCONTROL
)
:
iph
->
tos
;
iph
->
tos
;
if
(
ip_route_output
(
&
rt
,
iph
->
saddr
,
saddr
,
RT_TOS
(
tos
),
0
))
if
(
ip_route_output
(
&
rt
,
iph
->
saddr
,
saddr
,
RT_TOS
(
tos
),
0
))
goto
out
;
goto
out
_unlock
;
if
(
ip_options_echo
(
&
icmp_param
.
replyopts
,
skb_in
))
if
(
ip_options_echo
(
&
icmp_param
.
replyopts
,
skb_in
))
goto
ende
;
goto
ende
;
...
@@ -492,13 +550,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
...
@@ -492,13 +550,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
* Prepare data for ICMP header.
* Prepare data for ICMP header.
*/
*/
icmp_param
.
data
.
icmph
.
type
=
type
;
icmp_param
.
data
.
icmph
.
type
=
type
;
icmp_param
.
data
.
icmph
.
code
=
code
;
icmp_param
.
data
.
icmph
.
code
=
code
;
icmp_param
.
data
.
icmph
.
un
.
gateway
=
info
;
icmp_param
.
data
.
icmph
.
un
.
gateway
=
info
;
icmp_param
.
data
.
icmph
.
checksum
=
0
;
icmp_param
.
data
.
icmph
.
checksum
=
0
;
icmp_param
.
csum
=
0
;
icmp_param
.
csum
=
0
;
icmp_param
.
skb
=
skb_in
;
icmp_param
.
skb
=
skb_in
;
icmp_param
.
offset
=
skb_in
->
nh
.
raw
-
skb_in
->
data
;
icmp_param
.
offset
=
skb_in
->
nh
.
raw
-
skb_in
->
data
;
icmp_out_count
(
icmp_param
.
data
.
icmph
.
type
);
icmp_out_count
(
icmp_param
.
data
.
icmph
.
type
);
inet_sk
(
icmp_socket
->
sk
)
->
tos
=
tos
;
inet_sk
(
icmp_socket
->
sk
)
->
tos
=
tos
;
inet_sk
(
icmp_socket
->
sk
)
->
ttl
=
sysctl_ip_default_ttl
;
inet_sk
(
icmp_socket
->
sk
)
->
ttl
=
sysctl_ip_default_ttl
;
...
@@ -506,8 +564,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
...
@@ -506,8 +564,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
ipc
.
opt
=
&
icmp_param
.
replyopts
;
ipc
.
opt
=
&
icmp_param
.
replyopts
;
if
(
icmp_param
.
replyopts
.
srr
)
{
if
(
icmp_param
.
replyopts
.
srr
)
{
ip_rt_put
(
rt
);
ip_rt_put
(
rt
);
if
(
ip_route_output
(
&
rt
,
icmp_param
.
replyopts
.
faddr
,
saddr
,
RT_TOS
(
tos
),
0
))
if
(
ip_route_output
(
&
rt
,
icmp_param
.
replyopts
.
faddr
,
goto
out
;
saddr
,
RT_TOS
(
tos
),
0
))
goto
out_unlock
;
}
}
if
(
!
icmpv4_xrlim_allow
(
rt
,
type
,
code
))
if
(
!
icmpv4_xrlim_allow
(
rt
,
type
,
code
))
...
@@ -521,19 +580,19 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
...
@@ -521,19 +580,19 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
room
-=
sizeof
(
struct
iphdr
)
+
icmp_param
.
replyopts
.
optlen
;
room
-=
sizeof
(
struct
iphdr
)
+
icmp_param
.
replyopts
.
optlen
;
room
-=
sizeof
(
struct
icmphdr
);
room
-=
sizeof
(
struct
icmphdr
);
icmp_param
.
data_len
=
skb_in
->
len
-
icmp_param
.
offset
;
icmp_param
.
data_len
=
skb_in
->
len
-
icmp_param
.
offset
;
if
(
icmp_param
.
data_len
>
room
)
if
(
icmp_param
.
data_len
>
room
)
icmp_param
.
data_len
=
room
;
icmp_param
.
data_len
=
room
;
icmp_param
.
head_len
=
sizeof
(
struct
icmphdr
);
icmp_param
.
head_len
=
sizeof
(
struct
icmphdr
);
ip_build_xmit
(
icmp_socket
->
sk
,
icmp_glue_bits
,
&
icmp_param
,
ip_build_xmit
(
icmp_socket
->
sk
,
icmp_glue_bits
,
&
icmp_param
,
icmp_param
.
data_len
+
sizeof
(
struct
icmphdr
),
icmp_param
.
data_len
+
sizeof
(
struct
icmphdr
),
&
ipc
,
rt
,
MSG_DONTWAIT
);
&
ipc
,
rt
,
MSG_DONTWAIT
);
ende:
ende:
ip_rt_put
(
rt
);
ip_rt_put
(
rt
);
out:
out
_unlock
:
icmp_xmit_unlock
();
icmp_xmit_unlock
();
out:
;
}
}
...
@@ -556,60 +615,59 @@ static void icmp_unreach(struct sk_buff *skb)
...
@@ -556,60 +615,59 @@ static void icmp_unreach(struct sk_buff *skb)
* additional check for longer headers in upper levels.
* additional check for longer headers in upper levels.
*/
*/
if
(
!
pskb_may_pull
(
skb
,
sizeof
(
struct
iphdr
)))
{
if
(
!
pskb_may_pull
(
skb
,
sizeof
(
struct
iphdr
)))
ICMP_INC_STATS_BH
(
IcmpInErrors
);
goto
out_err
;
return
;
}
icmph
=
skb
->
h
.
icmph
;
icmph
=
skb
->
h
.
icmph
;
iph
=
(
struct
iphdr
*
)
skb
->
data
;
iph
=
(
struct
iphdr
*
)
skb
->
data
;
if
(
iph
->
ihl
<
5
)
{
if
(
iph
->
ihl
<
5
)
/* Mangled header, drop. */
/* Mangled header, drop. */
goto
out_err
;
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
}
if
(
icmph
->
type
==
ICMP_DEST_UNREACH
)
{
if
(
icmph
->
type
==
ICMP_DEST_UNREACH
)
{
switch
(
icmph
->
code
&
15
)
{
switch
(
icmph
->
code
&
15
)
{
case
ICMP_NET_UNREACH
:
case
ICMP_NET_UNREACH
:
break
;
case
ICMP_HOST_UNREACH
:
case
ICMP_HOST_UNREACH
:
break
;
case
ICMP_PROT_UNREACH
:
case
ICMP_PROT_UNREACH
:
break
;
case
ICMP_PORT_UNREACH
:
case
ICMP_PORT_UNREACH
:
break
;
break
;
case
ICMP_FRAG_NEEDED
:
case
ICMP_FRAG_NEEDED
:
if
(
ipv4_config
.
no_pmtu_disc
)
{
if
(
ipv4_config
.
no_pmtu_disc
)
{
if
(
net_ratelimit
())
if
(
net_ratelimit
())
printk
(
KERN_INFO
"ICMP: %u.%u.%u.%u: fragmentation needed and DF set.
\n
"
,
printk
(
KERN_INFO
"ICMP: %u.%u.%u.%u: "
"fragmentation needed "
"and DF set.
\n
"
,
NIPQUAD
(
iph
->
daddr
));
NIPQUAD
(
iph
->
daddr
));
}
else
{
}
else
{
info
=
ip_rt_frag_needed
(
iph
,
ntohs
(
icmph
->
un
.
frag
.
mtu
));
info
=
ip_rt_frag_needed
(
iph
,
ntohs
(
icmph
->
un
.
frag
.
mtu
));
if
(
!
info
)
if
(
!
info
)
goto
out
;
goto
out
;
}
}
break
;
break
;
case
ICMP_SR_FAILED
:
case
ICMP_SR_FAILED
:
if
(
net_ratelimit
())
if
(
net_ratelimit
())
printk
(
KERN_INFO
"ICMP: %u.%u.%u.%u: Source Route Failed.
\n
"
,
NIPQUAD
(
iph
->
daddr
));
printk
(
KERN_INFO
"ICMP: %u.%u.%u.%u: Source "
"Route Failed.
\n
"
,
NIPQUAD
(
iph
->
daddr
));
break
;
break
;
default:
default:
break
;
break
;
}
}
if
(
icmph
->
code
>
NR_ICMP_UNREACH
)
if
(
icmph
->
code
>
NR_ICMP_UNREACH
)
goto
out
;
goto
out
;
}
else
if
(
icmph
->
type
==
ICMP_PARAMETERPROB
)
{
}
else
if
(
icmph
->
type
==
ICMP_PARAMETERPROB
)
info
=
ntohl
(
icmph
->
un
.
gateway
)
>>
24
;
info
=
ntohl
(
icmph
->
un
.
gateway
)
>>
24
;
}
/*
/*
* Throw it at our lower layers
* Throw it at our lower layers
*
*
* RFC 1122: 3.2.2 MUST extract the protocol ID from the passed header.
* RFC 1122: 3.2.2 MUST extract the protocol ID from the passed
* RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the transport layer.
* header.
* RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to transport layer.
* RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the
* transport layer.
* RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to
* transport layer.
*/
*/
/*
/*
...
@@ -619,25 +677,22 @@ static void icmp_unreach(struct sk_buff *skb)
...
@@ -619,25 +677,22 @@ static void icmp_unreach(struct sk_buff *skb)
* get the other vendor to fix their kit.
* get the other vendor to fix their kit.
*/
*/
if
(
!
sysctl_icmp_ignore_bogus_error_responses
)
if
(
!
sysctl_icmp_ignore_bogus_error_responses
&&
{
inet_addr_type
(
iph
->
daddr
)
==
RTN_BROADCAST
)
{
if
(
inet_addr_type
(
iph
->
daddr
)
==
RTN_BROADCAST
)
{
if
(
net_ratelimit
())
if
(
net_ratelimit
())
printk
(
KERN_WARNING
"%u.%u.%u.%u sent an invalid ICMP error to a broadcast.
\n
"
,
printk
(
KERN_WARNING
"%u.%u.%u.%u sent an invalid ICMP "
"error to a broadcast.
\n
"
,
NIPQUAD
(
skb
->
nh
.
iph
->
saddr
));
NIPQUAD
(
skb
->
nh
.
iph
->
saddr
));
goto
out
;
goto
out
;
}
}
}
/* Checkin full IP header plus 8 bytes of protocol to
/* Checkin full IP header plus 8 bytes of protocol to
* avoid additional coding at protocol handlers.
* avoid additional coding at protocol handlers.
*/
*/
if
(
!
pskb_may_pull
(
skb
,
iph
->
ihl
*
4
+
8
))
if
(
!
pskb_may_pull
(
skb
,
iph
->
ihl
*
4
+
8
))
goto
out
;
goto
out
;
iph
=
(
struct
iphdr
*
)
skb
->
data
;
iph
=
(
struct
iphdr
*
)
skb
->
data
;
protocol
=
iph
->
protocol
;
protocol
=
iph
->
protocol
;
/*
/*
...
@@ -647,10 +702,10 @@ static void icmp_unreach(struct sk_buff *skb)
...
@@ -647,10 +702,10 @@ static void icmp_unreach(struct sk_buff *skb)
/* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
/* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
hash
=
protocol
&
(
MAX_INET_PROTOS
-
1
);
hash
=
protocol
&
(
MAX_INET_PROTOS
-
1
);
read_lock
(
&
raw_v4_lock
);
read_lock
(
&
raw_v4_lock
);
if
((
raw_sk
=
raw_v4_htable
[
hash
])
!=
NULL
)
if
((
raw_sk
=
raw_v4_htable
[
hash
])
!=
NULL
)
{
{
while
((
raw_sk
=
__raw_v4_lookup
(
raw_sk
,
protocol
,
iph
->
daddr
,
while
((
raw_sk
=
__raw_v4_lookup
(
raw_sk
,
protocol
,
iph
->
daddr
,
iph
->
saddr
,
skb
->
dev
->
ifindex
))
!=
NULL
)
{
iph
->
saddr
,
skb
->
dev
->
ifindex
))
!=
NULL
)
{
raw_err
(
raw_sk
,
skb
,
info
);
raw_err
(
raw_sk
,
skb
,
info
);
raw_sk
=
raw_sk
->
next
;
raw_sk
=
raw_sk
->
next
;
iph
=
(
struct
iphdr
*
)
skb
->
data
;
iph
=
(
struct
iphdr
*
)
skb
->
data
;
...
@@ -664,12 +719,11 @@ static void icmp_unreach(struct sk_buff *skb)
...
@@ -664,12 +719,11 @@ static void icmp_unreach(struct sk_buff *skb)
* we are OK.
* we are OK.
*/
*/
ipprot
=
(
struct
inet_protocol
*
)
inet_protos
[
hash
];
ipprot
=
(
struct
inet_protocol
*
)
inet_protos
[
hash
];
while
(
ipprot
)
{
while
(
ipprot
)
{
struct
inet_protocol
*
nextip
;
struct
inet_protocol
*
nextip
;
nextip
=
(
struct
inet_protocol
*
)
ipprot
->
next
;
nextip
=
(
struct
inet_protocol
*
)
ipprot
->
next
;
/*
/*
* Pass it off to everyone who wants it.
* Pass it off to everyone who wants it.
*/
*/
...
@@ -682,7 +736,11 @@ static void icmp_unreach(struct sk_buff *skb)
...
@@ -682,7 +736,11 @@ static void icmp_unreach(struct sk_buff *skb)
ipprot
=
nextip
;
ipprot
=
nextip
;
}
}
out:
;
out:
return
;
out_err:
ICMP_INC_STATS_BH
(
IcmpInErrors
);
goto
out
;
}
}
...
@@ -695,18 +753,16 @@ static void icmp_redirect(struct sk_buff *skb)
...
@@ -695,18 +753,16 @@ static void icmp_redirect(struct sk_buff *skb)
struct
iphdr
*
iph
;
struct
iphdr
*
iph
;
unsigned
long
ip
;
unsigned
long
ip
;
if
(
skb
->
len
<
sizeof
(
struct
iphdr
))
{
if
(
skb
->
len
<
sizeof
(
struct
iphdr
))
ICMP_INC_STATS_BH
(
IcmpInErrors
);
goto
out_err
;
return
;
}
/*
/*
* Get the copied header of the packet that caused the redirect
* Get the copied header of the packet that caused the redirect
*/
*/
if
(
!
pskb_may_pull
(
skb
,
sizeof
(
struct
iphdr
)))
if
(
!
pskb_may_pull
(
skb
,
sizeof
(
struct
iphdr
)))
return
;
goto
out
;
iph
=
(
struct
iphdr
*
)
skb
->
data
;
iph
=
(
struct
iphdr
*
)
skb
->
data
;
ip
=
iph
->
daddr
;
ip
=
iph
->
daddr
;
switch
(
skb
->
h
.
icmph
->
code
&
7
)
{
switch
(
skb
->
h
.
icmph
->
code
&
7
)
{
...
@@ -716,22 +772,31 @@ static void icmp_redirect(struct sk_buff *skb)
...
@@ -716,22 +772,31 @@ static void icmp_redirect(struct sk_buff *skb)
* As per RFC recommendations now handle it as
* As per RFC recommendations now handle it as
* a host redirect.
* a host redirect.
*/
*/
case
ICMP_REDIR_HOST
:
case
ICMP_REDIR_HOST
:
case
ICMP_REDIR_HOSTTOS
:
case
ICMP_REDIR_HOSTTOS
:
ip_rt_redirect
(
skb
->
nh
.
iph
->
saddr
,
ip
,
skb
->
h
.
icmph
->
un
.
gateway
,
iph
->
saddr
,
iph
->
tos
,
skb
->
dev
);
ip_rt_redirect
(
skb
->
nh
.
iph
->
saddr
,
ip
,
skb
->
h
.
icmph
->
un
.
gateway
,
iph
->
saddr
,
iph
->
tos
,
skb
->
dev
);
break
;
break
;
default:
default:
break
;
break
;
}
}
out:
return
;
out_err:
ICMP_INC_STATS_BH
(
IcmpInErrors
);
goto
out
;
}
}
/*
/*
* Handle ICMP_ECHO ("ping") requests.
* Handle ICMP_ECHO ("ping") requests.
*
*
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo requests.
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
* RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be included in the reply.
* requests.
* RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring echo requests, MUST have default=NOT.
* RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be
* included in the reply.
* RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring
* echo requests, MUST have default=NOT.
* See also WRT handling of options once they are done and working.
* See also WRT handling of options once they are done and working.
*/
*/
...
@@ -740,12 +805,12 @@ static void icmp_echo(struct sk_buff *skb)
...
@@ -740,12 +805,12 @@ static void icmp_echo(struct sk_buff *skb)
if
(
!
sysctl_icmp_echo_ignore_all
)
{
if
(
!
sysctl_icmp_echo_ignore_all
)
{
struct
icmp_bxm
icmp_param
;
struct
icmp_bxm
icmp_param
;
icmp_param
.
data
.
icmph
=
*
skb
->
h
.
icmph
;
icmp_param
.
data
.
icmph
=
*
skb
->
h
.
icmph
;
icmp_param
.
data
.
icmph
.
type
=
ICMP_ECHOREPLY
;
icmp_param
.
data
.
icmph
.
type
=
ICMP_ECHOREPLY
;
icmp_param
.
skb
=
skb
;
icmp_param
.
skb
=
skb
;
icmp_param
.
offset
=
0
;
icmp_param
.
offset
=
0
;
icmp_param
.
data_len
=
skb
->
len
;
icmp_param
.
data_len
=
skb
->
len
;
icmp_param
.
head_len
=
sizeof
(
struct
icmphdr
);
icmp_param
.
head_len
=
sizeof
(
struct
icmphdr
);
icmp_reply
(
&
icmp_param
,
skb
);
icmp_reply
(
&
icmp_param
,
skb
);
}
}
}
}
...
@@ -757,37 +822,38 @@ static void icmp_echo(struct sk_buff *skb)
...
@@ -757,37 +822,38 @@ static void icmp_echo(struct sk_buff *skb)
* MUST be accurate to a few minutes.
* MUST be accurate to a few minutes.
* MUST be updated at least at 15Hz.
* MUST be updated at least at 15Hz.
*/
*/
static
void
icmp_timestamp
(
struct
sk_buff
*
skb
)
static
void
icmp_timestamp
(
struct
sk_buff
*
skb
)
{
{
struct
timeval
tv
;
struct
timeval
tv
;
struct
icmp_bxm
icmp_param
;
struct
icmp_bxm
icmp_param
;
/*
/*
* Too short.
* Too short.
*/
*/
if
(
skb
->
len
<
4
)
if
(
skb
->
len
<
4
)
{
goto
out_err
;
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
}
/*
/*
* Fill in the current time as ms since midnight UT:
* Fill in the current time as ms since midnight UT:
*/
*/
do_gettimeofday
(
&
tv
);
do_gettimeofday
(
&
tv
);
icmp_param
.
data
.
times
[
1
]
=
htonl
((
tv
.
tv_sec
%
86400
)
*
1000
+
tv
.
tv_usec
/
1000
);
icmp_param
.
data
.
times
[
1
]
=
htonl
((
tv
.
tv_sec
%
86400
)
*
1000
+
tv
.
tv_usec
/
1000
);
icmp_param
.
data
.
times
[
2
]
=
icmp_param
.
data
.
times
[
1
];
icmp_param
.
data
.
times
[
2
]
=
icmp_param
.
data
.
times
[
1
];
if
(
skb_copy_bits
(
skb
,
0
,
&
icmp_param
.
data
.
times
[
0
],
4
))
if
(
skb_copy_bits
(
skb
,
0
,
&
icmp_param
.
data
.
times
[
0
],
4
))
BUG
();
BUG
();
icmp_param
.
data
.
icmph
=
*
skb
->
h
.
icmph
;
icmp_param
.
data
.
icmph
=
*
skb
->
h
.
icmph
;
icmp_param
.
data
.
icmph
.
type
=
ICMP_TIMESTAMPREPLY
;
icmp_param
.
data
.
icmph
.
type
=
ICMP_TIMESTAMPREPLY
;
icmp_param
.
data
.
icmph
.
code
=
0
;
icmp_param
.
data
.
icmph
.
code
=
0
;
icmp_param
.
skb
=
skb
;
icmp_param
.
skb
=
skb
;
icmp_param
.
offset
=
0
;
icmp_param
.
offset
=
0
;
icmp_param
.
data_len
=
0
;
icmp_param
.
data_len
=
0
;
icmp_param
.
head_len
=
sizeof
(
struct
icmphdr
)
+
12
;
icmp_param
.
head_len
=
sizeof
(
struct
icmphdr
)
+
12
;
icmp_reply
(
&
icmp_param
,
skb
);
icmp_reply
(
&
icmp_param
,
skb
);
out:
return
;
out_err:
ICMP_INC_STATS_BH
(
IcmpInErrors
);
goto
out
;
}
}
...
@@ -839,35 +905,38 @@ static void icmp_address(struct sk_buff *skb)
...
@@ -839,35 +905,38 @@ static void icmp_address(struct sk_buff *skb)
static
void
icmp_address_reply
(
struct
sk_buff
*
skb
)
static
void
icmp_address_reply
(
struct
sk_buff
*
skb
)
{
{
struct
rtable
*
rt
=
(
struct
rtable
*
)
skb
->
dst
;
struct
rtable
*
rt
=
(
struct
rtable
*
)
skb
->
dst
;
struct
net_device
*
dev
=
skb
->
dev
;
struct
net_device
*
dev
=
skb
->
dev
;
struct
in_device
*
in_dev
;
struct
in_device
*
in_dev
;
struct
in_ifaddr
*
ifa
;
struct
in_ifaddr
*
ifa
;
u32
mask
;
u32
mask
;
if
(
skb
->
len
<
4
||
!
(
rt
->
rt_flags
&
RTCF_DIRECTSRC
))
if
(
skb
->
len
<
4
||
!
(
rt
->
rt_flags
&
RTCF_DIRECTSRC
))
return
;
goto
out
;
in_dev
=
in_dev_get
(
dev
);
in_dev
=
in_dev_get
(
dev
);
if
(
!
in_dev
)
if
(
!
in_dev
)
return
;
goto
out
;
read_lock
(
&
in_dev
->
lock
);
read_lock
(
&
in_dev
->
lock
);
if
(
in_dev
->
ifa_list
&&
if
(
in_dev
->
ifa_list
&&
IN_DEV_LOG_MARTIANS
(
in_dev
)
&&
IN_DEV_LOG_MARTIANS
(
in_dev
)
&&
IN_DEV_FORWARD
(
in_dev
))
{
IN_DEV_FORWARD
(
in_dev
))
{
if
(
skb_copy_bits
(
skb
,
0
,
&
mask
,
4
))
if
(
skb_copy_bits
(
skb
,
0
,
&
mask
,
4
))
BUG
();
BUG
();
for
(
ifa
=
in_dev
->
ifa_list
;
ifa
;
ifa
=
ifa
->
ifa_next
)
{
for
(
ifa
=
in_dev
->
ifa_list
;
ifa
;
ifa
=
ifa
->
ifa_next
)
{
if
(
mask
==
ifa
->
ifa_mask
&&
inet_ifa_match
(
rt
->
rt_src
,
ifa
))
if
(
mask
==
ifa
->
ifa_mask
&&
inet_ifa_match
(
rt
->
rt_src
,
ifa
))
break
;
break
;
}
}
if
(
!
ifa
&&
net_ratelimit
())
{
if
(
!
ifa
&&
net_ratelimit
())
{
printk
(
KERN_INFO
"Wrong address mask %u.%u.%u.%u from %s/%u.%u.%u.%u
\n
"
,
printk
(
KERN_INFO
"Wrong address mask %u.%u.%u.%u from "
"%s/%u.%u.%u.%u
\n
"
,
NIPQUAD
(
mask
),
dev
->
name
,
NIPQUAD
(
rt
->
rt_src
));
NIPQUAD
(
mask
),
dev
->
name
,
NIPQUAD
(
rt
->
rt_src
));
}
}
}
}
read_unlock
(
&
in_dev
->
lock
);
read_unlock
(
&
in_dev
->
lock
);
in_dev_put
(
in_dev
);
in_dev_put
(
in_dev
);
out:
;
}
}
static
void
icmp_discard
(
struct
sk_buff
*
skb
)
static
void
icmp_discard
(
struct
sk_buff
*
skb
)
...
@@ -877,19 +946,19 @@ static void icmp_discard(struct sk_buff *skb)
...
@@ -877,19 +946,19 @@ static void icmp_discard(struct sk_buff *skb)
/*
/*
* Deal with incoming ICMP packets.
* Deal with incoming ICMP packets.
*/
*/
int
icmp_rcv
(
struct
sk_buff
*
skb
)
int
icmp_rcv
(
struct
sk_buff
*
skb
)
{
{
struct
icmphdr
*
icmph
;
struct
icmphdr
*
icmph
;
struct
rtable
*
rt
=
(
struct
rtable
*
)
skb
->
dst
;
struct
rtable
*
rt
=
(
struct
rtable
*
)
skb
->
dst
;
ICMP_INC_STATS_BH
(
IcmpInMsgs
);
ICMP_INC_STATS_BH
(
IcmpInMsgs
);
switch
(
skb
->
ip_summed
)
{
switch
(
skb
->
ip_summed
)
{
case
CHECKSUM_HW
:
case
CHECKSUM_HW
:
if
(
(
u16
)
csum_fold
(
skb
->
csum
)
==
0
)
if
(
!
(
u16
)
csum_fold
(
skb
->
csum
)
)
break
;
break
;
NETDEBUG
(
if
(
net_ratelimit
())
printk
(
KERN_DEBUG
"icmp v4 hw csum failure
\n
"
));
NETDEBUG
(
if
(
net_ratelimit
())
printk
(
KERN_DEBUG
"icmp v4 hw csum failure
\n
"
));
case
CHECKSUM_NONE
:
case
CHECKSUM_NONE
:
if
((
u16
)
csum_fold
(
skb_checksum
(
skb
,
0
,
skb
->
len
,
0
)))
if
((
u16
)
csum_fold
(
skb_checksum
(
skb
,
0
,
skb
->
len
,
0
)))
goto
error
;
goto
error
;
...
@@ -904,7 +973,8 @@ int icmp_rcv(struct sk_buff *skb)
...
@@ -904,7 +973,8 @@ int icmp_rcv(struct sk_buff *skb)
/*
/*
* 18 is the highest 'known' ICMP type. Anything else is a mystery
* 18 is the highest 'known' ICMP type. Anything else is a mystery
*
*
* RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently discarded.
* RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently
* discarded.
*/
*/
if
(
icmph
->
type
>
NR_ICMP_TYPES
)
if
(
icmph
->
type
>
NR_ICMP_TYPES
)
goto
error
;
goto
error
;
...
@@ -914,7 +984,7 @@ int icmp_rcv(struct sk_buff *skb)
...
@@ -914,7 +984,7 @@ int icmp_rcv(struct sk_buff *skb)
* Parse the ICMP message
* Parse the ICMP message
*/
*/
if
(
rt
->
rt_flags
&
(
RTCF_BROADCAST
|
RTCF_MULTICAST
))
{
if
(
rt
->
rt_flags
&
(
RTCF_BROADCAST
|
RTCF_MULTICAST
))
{
/*
/*
* RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
* RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
* silently ignored (we let user decide with a sysctl).
* silently ignored (we let user decide with a sysctl).
...
@@ -933,7 +1003,9 @@ int icmp_rcv(struct sk_buff *skb)
...
@@ -933,7 +1003,9 @@ int icmp_rcv(struct sk_buff *skb)
}
}
}
}
icmp_pointers
[
icmph
->
type
].
input
[
smp_processor_id
()
*
2
*
sizeof
(
struct
icmp_mib
)
/
sizeof
(
unsigned
long
)]
++
;
icmp_pointers
[
icmph
->
type
].
input
[
smp_processor_id
()
*
2
*
sizeof
(
struct
icmp_mib
)
/
sizeof
(
unsigned
long
)]
++
;
(
icmp_pointers
[
icmph
->
type
].
handler
)(
skb
);
(
icmp_pointers
[
icmph
->
type
].
handler
)(
skb
);
drop:
drop:
...
@@ -947,40 +1019,127 @@ int icmp_rcv(struct sk_buff *skb)
...
@@ -947,40 +1019,127 @@ int icmp_rcv(struct sk_buff *skb)
/*
/*
* This table is the definition of how we handle ICMP.
* This table is the definition of how we handle ICMP.
*/
*/
static
struct
icmp_control
icmp_pointers
[
NR_ICMP_TYPES
+
1
]
=
{
static
struct
icmp_control
icmp_pointers
[
NR_ICMP_TYPES
+
1
]
=
{
/* ECHO REPLY (0) */
/* ECHO REPLY (0) */
[
0
]
=
{
{
&
icmp_statistics
[
0
].
IcmpOutEchoReps
,
&
icmp_statistics
[
0
].
IcmpInEchoReps
,
icmp_discard
,
0
},
output:
&
icmp_statistics
[
0
].
IcmpOutEchoReps
,
{
&
icmp_statistics
[
0
].
dummy
,
&
icmp_statistics
[
0
].
IcmpInErrors
,
icmp_discard
,
1
},
input:
&
icmp_statistics
[
0
].
IcmpInEchoReps
,
{
&
icmp_statistics
[
0
].
dummy
,
&
icmp_statistics
[
0
].
IcmpInErrors
,
icmp_discard
,
1
},
handler:
icmp_discard
,
/* DEST UNREACH (3) */
},
{
&
icmp_statistics
[
0
].
IcmpOutDestUnreachs
,
&
icmp_statistics
[
0
].
IcmpInDestUnreachs
,
icmp_unreach
,
1
},
[
1
]
=
{
/* SOURCE QUENCH (4) */
output:
&
icmp_statistics
[
0
].
dummy
,
{
&
icmp_statistics
[
0
].
IcmpOutSrcQuenchs
,
&
icmp_statistics
[
0
].
IcmpInSrcQuenchs
,
icmp_unreach
,
1
},
input:
&
icmp_statistics
[
0
].
IcmpInErrors
,
/* REDIRECT (5) */
handler:
icmp_discard
,
{
&
icmp_statistics
[
0
].
IcmpOutRedirects
,
&
icmp_statistics
[
0
].
IcmpInRedirects
,
icmp_redirect
,
1
},
error:
1
,
{
&
icmp_statistics
[
0
].
dummy
,
&
icmp_statistics
[
0
].
IcmpInErrors
,
icmp_discard
,
1
},
},
{
&
icmp_statistics
[
0
].
dummy
,
&
icmp_statistics
[
0
].
IcmpInErrors
,
icmp_discard
,
1
},
[
2
]
=
{
/* ECHO (8) */
output:
&
icmp_statistics
[
0
].
dummy
,
{
&
icmp_statistics
[
0
].
IcmpOutEchos
,
&
icmp_statistics
[
0
].
IcmpInEchos
,
icmp_echo
,
0
},
input:
&
icmp_statistics
[
0
].
IcmpInErrors
,
{
&
icmp_statistics
[
0
].
dummy
,
&
icmp_statistics
[
0
].
IcmpInErrors
,
icmp_discard
,
1
},
handler:
icmp_discard
,
{
&
icmp_statistics
[
0
].
dummy
,
&
icmp_statistics
[
0
].
IcmpInErrors
,
icmp_discard
,
1
},
error:
1
,
/* TIME EXCEEDED (11) */
},
{
&
icmp_statistics
[
0
].
IcmpOutTimeExcds
,
&
icmp_statistics
[
0
].
IcmpInTimeExcds
,
icmp_unreach
,
1
},
/* DEST UNREACH (3) */
/* PARAMETER PROBLEM (12) */
[
3
]
=
{
{
&
icmp_statistics
[
0
].
IcmpOutParmProbs
,
&
icmp_statistics
[
0
].
IcmpInParmProbs
,
icmp_unreach
,
1
},
output:
&
icmp_statistics
[
0
].
IcmpOutDestUnreachs
,
/* TIMESTAMP (13) */
input:
&
icmp_statistics
[
0
].
IcmpInDestUnreachs
,
{
&
icmp_statistics
[
0
].
IcmpOutTimestamps
,
&
icmp_statistics
[
0
].
IcmpInTimestamps
,
icmp_timestamp
,
0
},
handler:
icmp_unreach
,
/* TIMESTAMP REPLY (14) */
error:
1
,
{
&
icmp_statistics
[
0
].
IcmpOutTimestampReps
,
&
icmp_statistics
[
0
].
IcmpInTimestampReps
,
icmp_discard
,
0
},
},
/* INFO (15) */
/* SOURCE QUENCH (4) */
{
&
icmp_statistics
[
0
].
dummy
,
&
icmp_statistics
[
0
].
dummy
,
icmp_discard
,
0
},
[
4
]
=
{
/* INFO REPLY (16) */
output:
&
icmp_statistics
[
0
].
IcmpOutSrcQuenchs
,
{
&
icmp_statistics
[
0
].
dummy
,
&
icmp_statistics
[
0
].
dummy
,
icmp_discard
,
0
},
input:
&
icmp_statistics
[
0
].
IcmpInSrcQuenchs
,
/* ADDR MASK (17) */
icmp_unreach
,
{
&
icmp_statistics
[
0
].
IcmpOutAddrMasks
,
&
icmp_statistics
[
0
].
IcmpInAddrMasks
,
icmp_address
,
0
},
error:
1
,
/* ADDR MASK REPLY (18) */
},
{
&
icmp_statistics
[
0
].
IcmpOutAddrMaskReps
,
&
icmp_statistics
[
0
].
IcmpInAddrMaskReps
,
icmp_address_reply
,
0
}
/* REDIRECT (5) */
[
5
]
=
{
output:
&
icmp_statistics
[
0
].
IcmpOutRedirects
,
input:
&
icmp_statistics
[
0
].
IcmpInRedirects
,
handler:
icmp_redirect
,
error:
1
,
},
[
6
]
=
{
output:
&
icmp_statistics
[
0
].
dummy
,
input:
&
icmp_statistics
[
0
].
IcmpInErrors
,
handler:
icmp_discard
,
error:
1
,
},
[
7
]
=
{
output:
&
icmp_statistics
[
0
].
dummy
,
input:
&
icmp_statistics
[
0
].
IcmpInErrors
,
handler:
icmp_discard
,
error:
1
,
},
/* ECHO (8) */
[
8
]
=
{
output:
&
icmp_statistics
[
0
].
IcmpOutEchos
,
input:
&
icmp_statistics
[
0
].
IcmpInEchos
,
handler:
icmp_echo
,
error:
0
,
},
[
9
]
=
{
output:
&
icmp_statistics
[
0
].
dummy
,
input:
&
icmp_statistics
[
0
].
IcmpInErrors
,
handler:
icmp_discard
,
error:
1
,
},
[
10
]
=
{
output:
&
icmp_statistics
[
0
].
dummy
,
input:
&
icmp_statistics
[
0
].
IcmpInErrors
,
handler:
icmp_discard
,
error:
1
,
},
/* TIME EXCEEDED (11) */
[
11
]
=
{
output:
&
icmp_statistics
[
0
].
IcmpOutTimeExcds
,
input:
&
icmp_statistics
[
0
].
IcmpInTimeExcds
,
handler:
icmp_unreach
,
error:
1
,
},
/* PARAMETER PROBLEM (12) */
[
12
]
=
{
output:
&
icmp_statistics
[
0
].
IcmpOutParmProbs
,
input:
&
icmp_statistics
[
0
].
IcmpInParmProbs
,
handler:
icmp_unreach
,
error:
1
,
},
/* TIMESTAMP (13) */
[
13
]
=
{
output:
&
icmp_statistics
[
0
].
IcmpOutTimestamps
,
input:
&
icmp_statistics
[
0
].
IcmpInTimestamps
,
handler:
icmp_timestamp
,
},
/* TIMESTAMP REPLY (14) */
[
14
]
=
{
output:
&
icmp_statistics
[
0
].
IcmpOutTimestampReps
,
input:
&
icmp_statistics
[
0
].
IcmpInTimestampReps
,
handler:
icmp_discard
,
},
/* INFO (15) */
[
15
]
=
{
output:
&
icmp_statistics
[
0
].
dummy
,
input:
&
icmp_statistics
[
0
].
dummy
,
handler:
icmp_discard
,
},
/* INFO REPLY (16) */
[
16
]
=
{
output:
&
icmp_statistics
[
0
].
dummy
,
input:
&
icmp_statistics
[
0
].
dummy
,
handler:
icmp_discard
,
},
/* ADDR MASK (17) */
[
17
]
=
{
output:
&
icmp_statistics
[
0
].
IcmpOutAddrMasks
,
input:
&
icmp_statistics
[
0
].
IcmpInAddrMasks
,
handler:
icmp_address
,
},
/* ADDR MASK REPLY (18) */
[
18
]
=
{
output:
&
icmp_statistics
[
0
].
IcmpOutAddrMaskReps
,
input:
&
icmp_statistics
[
0
].
IcmpInAddrMaskReps
,
handler:
icmp_address_reply
,
}
};
};
void
__init
icmp_init
(
struct
net_proto_family
*
ops
)
void
__init
icmp_init
(
struct
net_proto_family
*
ops
)
...
@@ -990,8 +1149,8 @@ void __init icmp_init(struct net_proto_family *ops)
...
@@ -990,8 +1149,8 @@ void __init icmp_init(struct net_proto_family *ops)
if
(
err
<
0
)
if
(
err
<
0
)
panic
(
"Failed to create the ICMP control socket.
\n
"
);
panic
(
"Failed to create the ICMP control socket.
\n
"
);
icmp_socket
->
sk
->
allocation
=
GFP_ATOMIC
;
icmp_socket
->
sk
->
allocation
=
GFP_ATOMIC
;
icmp_socket
->
sk
->
sndbuf
=
SK_WMEM_MAX
*
2
;
icmp_socket
->
sk
->
sndbuf
=
SK_WMEM_MAX
*
2
;
inet
=
inet_sk
(
icmp_socket
->
sk
);
inet
=
inet_sk
(
icmp_socket
->
sk
);
inet
->
ttl
=
MAXTTL
;
inet
->
ttl
=
MAXTTL
;
inet
->
pmtudisc
=
IP_PMTUDISC_DONT
;
inet
->
pmtudisc
=
IP_PMTUDISC_DONT
;
...
...
net/ipv4/tcp_ipv4.c
View file @
868f24fc
...
@@ -32,7 +32,8 @@
...
@@ -32,7 +32,8 @@
* and the rest go in the other half.
* and the rest go in the other half.
* Andi Kleen : Add support for syncookies and fixed
* Andi Kleen : Add support for syncookies and fixed
* some bugs: ip options weren't passed to
* some bugs: ip options weren't passed to
* the TCP layer, missed a check for an ACK bit.
* the TCP layer, missed a check for an
* ACK bit.
* Andi Kleen : Implemented fast path mtu discovery.
* Andi Kleen : Implemented fast path mtu discovery.
* Fixed many serious bugs in the
* Fixed many serious bugs in the
* open_request handling and moved
* open_request handling and moved
...
@@ -42,7 +43,8 @@
...
@@ -42,7 +43,8 @@
* Mike McLagan : Routing by source
* Mike McLagan : Routing by source
* Juan Jose Ciarlante: ip_dynaddr bits
* Juan Jose Ciarlante: ip_dynaddr bits
* Andi Kleen: various fixes.
* Andi Kleen: various fixes.
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
* Vitaly E. Lavrov : Transparent proxy revived after year
* coma.
* Andi Kleen : Fix new listen.
* Andi Kleen : Fix new listen.
* Andi Kleen : Fix accept error reporting.
* Andi Kleen : Fix accept error reporting.
*/
*/
...
@@ -65,7 +67,7 @@
...
@@ -65,7 +67,7 @@
extern
int
sysctl_ip_dynaddr
;
extern
int
sysctl_ip_dynaddr
;
extern
int
sysctl_ip_default_ttl
;
extern
int
sysctl_ip_default_ttl
;
int
sysctl_tcp_tw_reuse
=
0
;
int
sysctl_tcp_tw_reuse
;
/* Check TCP sequence numbers in ICMP packets. */
/* Check TCP sequence numbers in ICMP packets. */
#define ICMP_MIN_LENGTH 8
#define ICMP_MIN_LENGTH 8
...
@@ -76,15 +78,7 @@ static struct socket *tcp_socket;
...
@@ -76,15 +78,7 @@ static struct socket *tcp_socket;
void
tcp_v4_send_check
(
struct
sock
*
sk
,
struct
tcphdr
*
th
,
int
len
,
void
tcp_v4_send_check
(
struct
sock
*
sk
,
struct
tcphdr
*
th
,
int
len
,
struct
sk_buff
*
skb
);
struct
sk_buff
*
skb
);
/*
* ALL members must be initialised to prevent gcc-2.7.2.3 miscompilation
*/
struct
tcp_hashinfo
__cacheline_aligned
tcp_hashinfo
=
{
struct
tcp_hashinfo
__cacheline_aligned
tcp_hashinfo
=
{
__tcp_ehash:
NULL
,
__tcp_bhash:
NULL
,
__tcp_bhash_size:
0
,
__tcp_ehash_size:
0
,
__tcp_listening_hash:
{
NULL
,
},
__tcp_lhash_lock:
RW_LOCK_UNLOCKED
,
__tcp_lhash_lock:
RW_LOCK_UNLOCKED
,
__tcp_lhash_users:
ATOMIC_INIT
(
0
),
__tcp_lhash_users:
ATOMIC_INIT
(
0
),
__tcp_lhash_wait:
__tcp_lhash_wait:
...
@@ -98,14 +92,14 @@ struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
...
@@ -98,14 +92,14 @@ struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
* 32768-61000
* 32768-61000
*/
*/
int
sysctl_local_port_range
[
2
]
=
{
1024
,
4999
};
int
sysctl_local_port_range
[
2
]
=
{
1024
,
4999
};
int
tcp_port_rover
=
(
1024
-
1
)
;
int
tcp_port_rover
=
1024
-
1
;
static
__inline__
int
tcp_hashfn
(
__u32
laddr
,
__u16
lport
,
static
__inline__
int
tcp_hashfn
(
__u32
laddr
,
__u16
lport
,
__u32
faddr
,
__u16
fport
)
__u32
faddr
,
__u16
fport
)
{
{
int
h
=
(
(
laddr
^
lport
)
^
(
faddr
^
fport
)
);
int
h
=
(
laddr
^
lport
)
^
(
faddr
^
fport
);
h
^=
h
>>
16
;
h
^=
h
>>
16
;
h
^=
h
>>
8
;
h
^=
h
>>
8
;
return
h
&
(
tcp_ehash_size
-
1
);
return
h
&
(
tcp_ehash_size
-
1
);
}
}
...
@@ -126,14 +120,13 @@ static __inline__ int tcp_sk_hashfn(struct sock *sk)
...
@@ -126,14 +120,13 @@ static __inline__ int tcp_sk_hashfn(struct sock *sk)
struct
tcp_bind_bucket
*
tcp_bucket_create
(
struct
tcp_bind_hashbucket
*
head
,
struct
tcp_bind_bucket
*
tcp_bucket_create
(
struct
tcp_bind_hashbucket
*
head
,
unsigned
short
snum
)
unsigned
short
snum
)
{
{
struct
tcp_bind_bucket
*
tb
;
struct
tcp_bind_bucket
*
tb
=
kmem_cache_alloc
(
tcp_bucket_cachep
,
SLAB_ATOMIC
);
tb
=
kmem_cache_alloc
(
tcp_bucket_cachep
,
SLAB_ATOMIC
);
if
(
tb
)
{
if
(
tb
!=
NULL
)
{
tb
->
port
=
snum
;
tb
->
port
=
snum
;
tb
->
fastreuse
=
0
;
tb
->
fastreuse
=
0
;
tb
->
owners
=
NULL
;
tb
->
owners
=
NULL
;
if
((
tb
->
next
=
head
->
chain
)
!=
NULL
)
if
((
tb
->
next
=
head
->
chain
)
!=
NULL
)
tb
->
next
->
pprev
=
&
tb
->
next
;
tb
->
next
->
pprev
=
&
tb
->
next
;
head
->
chain
=
tb
;
head
->
chain
=
tb
;
tb
->
pprev
=
&
head
->
chain
;
tb
->
pprev
=
&
head
->
chain
;
...
@@ -154,7 +147,7 @@ static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
...
@@ -154,7 +147,7 @@ static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
tb
->
owners
->
bind_pprev
=
&
child
->
bind_next
;
tb
->
owners
->
bind_pprev
=
&
child
->
bind_next
;
tb
->
owners
=
child
;
tb
->
owners
=
child
;
child
->
bind_pprev
=
&
tb
->
owners
;
child
->
bind_pprev
=
&
tb
->
owners
;
child
->
prev
=
(
struct
sock
*
)
tb
;
child
->
prev
=
(
struct
sock
*
)
tb
;
spin_unlock
(
&
head
->
lock
);
spin_unlock
(
&
head
->
lock
);
}
}
...
@@ -165,14 +158,15 @@ __inline__ void tcp_inherit_port(struct sock *sk, struct sock *child)
...
@@ -165,14 +158,15 @@ __inline__ void tcp_inherit_port(struct sock *sk, struct sock *child)
local_bh_enable
();
local_bh_enable
();
}
}
static
inline
void
tcp_bind_hash
(
struct
sock
*
sk
,
struct
tcp_bind_bucket
*
tb
,
unsigned
short
snum
)
static
inline
void
tcp_bind_hash
(
struct
sock
*
sk
,
struct
tcp_bind_bucket
*
tb
,
unsigned
short
snum
)
{
{
inet_sk
(
sk
)
->
num
=
snum
;
inet_sk
(
sk
)
->
num
=
snum
;
if
((
sk
->
bind_next
=
tb
->
owners
)
!=
NULL
)
if
((
sk
->
bind_next
=
tb
->
owners
)
!=
NULL
)
tb
->
owners
->
bind_pprev
=
&
sk
->
bind_next
;
tb
->
owners
->
bind_pprev
=
&
sk
->
bind_next
;
tb
->
owners
=
sk
;
tb
->
owners
=
sk
;
sk
->
bind_pprev
=
&
tb
->
owners
;
sk
->
bind_pprev
=
&
tb
->
owners
;
sk
->
prev
=
(
struct
sock
*
)
tb
;
sk
->
prev
=
(
struct
sock
*
)
tb
;
}
}
static
inline
int
tcp_bind_conflict
(
struct
sock
*
sk
,
struct
tcp_bind_bucket
*
tb
)
static
inline
int
tcp_bind_conflict
(
struct
sock
*
sk
,
struct
tcp_bind_bucket
*
tb
)
...
@@ -181,16 +175,13 @@ static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
...
@@ -181,16 +175,13 @@ static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
struct
sock
*
sk2
=
tb
->
owners
;
struct
sock
*
sk2
=
tb
->
owners
;
int
sk_reuse
=
sk
->
reuse
;
int
sk_reuse
=
sk
->
reuse
;
for
(
;
sk2
!=
NULL
;
sk2
=
sk2
->
bind_next
)
{
for
(
;
sk2
;
sk2
=
sk2
->
bind_next
)
{
if
(
sk
!=
sk2
&&
if
(
sk
!=
sk2
&&
sk
->
bound_dev_if
==
sk2
->
bound_dev_if
)
{
sk
->
bound_dev_if
==
sk2
->
bound_dev_if
)
{
if
(
!
sk_reuse
||
!
sk2
->
reuse
||
if
(
!
sk_reuse
||
!
sk2
->
reuse
||
sk2
->
state
==
TCP_LISTEN
)
{
sk2
->
state
==
TCP_LISTEN
)
{
struct
inet_opt
*
inet2
=
inet_sk
(
sk2
);
struct
inet_opt
*
inet2
=
inet_sk
(
sk2
);
if
(
!
inet2
->
rcv_saddr
||
if
(
!
inet2
->
rcv_saddr
||
!
inet
->
rcv_saddr
||
!
inet
->
rcv_saddr
||
inet2
->
rcv_saddr
==
inet
->
rcv_saddr
)
(
inet2
->
rcv_saddr
==
inet
->
rcv_saddr
))
break
;
break
;
}
}
}
}
...
@@ -208,7 +199,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
...
@@ -208,7 +199,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
int
ret
;
int
ret
;
local_bh_disable
();
local_bh_disable
();
if
(
snum
==
0
)
{
if
(
!
snum
)
{
int
low
=
sysctl_local_port_range
[
0
];
int
low
=
sysctl_local_port_range
[
0
];
int
high
=
sysctl_local_port_range
[
1
];
int
high
=
sysctl_local_port_range
[
1
];
int
remaining
=
(
high
-
low
)
+
1
;
int
remaining
=
(
high
-
low
)
+
1
;
...
@@ -216,8 +207,9 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
...
@@ -216,8 +207,9 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
spin_lock
(
&
tcp_portalloc_lock
);
spin_lock
(
&
tcp_portalloc_lock
);
rover
=
tcp_port_rover
;
rover
=
tcp_port_rover
;
do
{
rover
++
;
do
{
if
((
rover
<
low
)
||
(
rover
>
high
))
rover
++
;
if
(
rover
<
low
||
rover
>
high
)
rover
=
low
;
rover
=
low
;
head
=
&
tcp_bhash
[
tcp_bhashfn
(
rover
)];
head
=
&
tcp_bhash
[
tcp_bhashfn
(
rover
)];
spin_lock
(
&
head
->
lock
);
spin_lock
(
&
head
->
lock
);
...
@@ -244,14 +236,14 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
...
@@ -244,14 +236,14 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
}
else
{
}
else
{
head
=
&
tcp_bhash
[
tcp_bhashfn
(
snum
)];
head
=
&
tcp_bhash
[
tcp_bhashfn
(
snum
)];
spin_lock
(
&
head
->
lock
);
spin_lock
(
&
head
->
lock
);
for
(
tb
=
head
->
chain
;
tb
!=
NULL
;
tb
=
tb
->
next
)
for
(
tb
=
head
->
chain
;
tb
;
tb
=
tb
->
next
)
if
(
tb
->
port
==
snum
)
if
(
tb
->
port
==
snum
)
break
;
break
;
}
}
if
(
tb
!=
NULL
&&
tb
->
owners
!=
NULL
)
{
if
(
tb
&&
tb
->
owners
)
{
if
(
sk
->
reuse
>
1
)
if
(
sk
->
reuse
>
1
)
goto
success
;
goto
success
;
if
(
tb
->
fastreuse
>
0
&&
sk
->
reuse
!=
0
&&
sk
->
state
!=
TCP_LISTEN
)
{
if
(
tb
->
fastreuse
>
0
&&
sk
->
reuse
&&
sk
->
state
!=
TCP_LISTEN
)
{
goto
success
;
goto
success
;
}
else
{
}
else
{
ret
=
1
;
ret
=
1
;
...
@@ -260,21 +252,19 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
...
@@ -260,21 +252,19 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
}
}
}
}
ret
=
1
;
ret
=
1
;
if
(
tb
==
NULL
&&
if
(
!
tb
&&
(
tb
=
tcp_bucket_create
(
head
,
snum
))
==
NULL
)
(
tb
=
tcp_bucket_create
(
head
,
snum
))
==
NULL
)
goto
fail_unlock
;
goto
fail_unlock
;
if
(
tb
->
owners
==
NULL
)
{
if
(
!
tb
->
owners
)
{
if
(
sk
->
reuse
&&
sk
->
state
!=
TCP_LISTEN
)
if
(
sk
->
reuse
&&
sk
->
state
!=
TCP_LISTEN
)
tb
->
fastreuse
=
1
;
tb
->
fastreuse
=
1
;
else
else
tb
->
fastreuse
=
0
;
tb
->
fastreuse
=
0
;
}
else
if
(
tb
->
fastreuse
&&
}
else
if
(
tb
->
fastreuse
&&
(
!
sk
->
reuse
||
sk
->
state
==
TCP_LISTEN
))
((
sk
->
reuse
==
0
)
||
(
sk
->
state
==
TCP_LISTEN
)))
tb
->
fastreuse
=
0
;
tb
->
fastreuse
=
0
;
success:
success:
if
(
sk
->
prev
==
NULL
)
if
(
!
sk
->
prev
)
tcp_bind_hash
(
sk
,
tb
,
snum
);
tcp_bind_hash
(
sk
,
tb
,
snum
);
BUG_TRAP
(
sk
->
prev
==
(
struct
sock
*
)
tb
);
BUG_TRAP
(
sk
->
prev
==
(
struct
sock
*
)
tb
);
ret
=
0
;
ret
=
0
;
fail_unlock:
fail_unlock:
...
@@ -300,7 +290,7 @@ __inline__ void __tcp_put_port(struct sock *sk)
...
@@ -300,7 +290,7 @@ __inline__ void __tcp_put_port(struct sock *sk)
*
(
sk
->
bind_pprev
)
=
sk
->
bind_next
;
*
(
sk
->
bind_pprev
)
=
sk
->
bind_next
;
sk
->
prev
=
NULL
;
sk
->
prev
=
NULL
;
inet
->
num
=
0
;
inet
->
num
=
0
;
if
(
tb
->
owners
==
NULL
)
{
if
(
!
tb
->
owners
)
{
if
(
tb
->
next
)
if
(
tb
->
next
)
tb
->
next
->
pprev
=
tb
->
pprev
;
tb
->
next
->
pprev
=
tb
->
pprev
;
*
(
tb
->
pprev
)
=
tb
->
next
;
*
(
tb
->
pprev
)
=
tb
->
next
;
...
@@ -333,7 +323,7 @@ void tcp_listen_wlock(void)
...
@@ -333,7 +323,7 @@ void tcp_listen_wlock(void)
add_wait_queue_exclusive
(
&
tcp_lhash_wait
,
&
wait
);
add_wait_queue_exclusive
(
&
tcp_lhash_wait
,
&
wait
);
for
(;;)
{
for
(;;)
{
set_current_state
(
TASK_UNINTERRUPTIBLE
);
set_current_state
(
TASK_UNINTERRUPTIBLE
);
if
(
atomic_read
(
&
tcp_lhash_users
)
==
0
)
if
(
!
atomic_read
(
&
tcp_lhash_users
)
)
break
;
break
;
write_unlock_bh
(
&
tcp_lhash_lock
);
write_unlock_bh
(
&
tcp_lhash_lock
);
schedule
();
schedule
();
...
@@ -350,8 +340,8 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
...
@@ -350,8 +340,8 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
struct
sock
**
skp
;
struct
sock
**
skp
;
rwlock_t
*
lock
;
rwlock_t
*
lock
;
BUG_TRAP
(
sk
->
pprev
==
NULL
);
BUG_TRAP
(
!
sk
->
pprev
);
if
(
listen_possible
&&
sk
->
state
==
TCP_LISTEN
)
{
if
(
listen_possible
&&
sk
->
state
==
TCP_LISTEN
)
{
skp
=
&
tcp_listening_hash
[
tcp_sk_listen_hashfn
(
sk
)];
skp
=
&
tcp_listening_hash
[
tcp_sk_listen_hashfn
(
sk
)];
lock
=
&
tcp_lhash_lock
;
lock
=
&
tcp_lhash_lock
;
tcp_listen_wlock
();
tcp_listen_wlock
();
...
@@ -360,7 +350,7 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
...
@@ -360,7 +350,7 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
lock
=
&
tcp_ehash
[
sk
->
hashent
].
lock
;
lock
=
&
tcp_ehash
[
sk
->
hashent
].
lock
;
write_lock
(
lock
);
write_lock
(
lock
);
}
}
if
((
sk
->
next
=
*
skp
)
!=
NULL
)
if
((
sk
->
next
=
*
skp
)
!=
NULL
)
(
*
skp
)
->
pprev
=
&
sk
->
next
;
(
*
skp
)
->
pprev
=
&
sk
->
next
;
*
skp
=
sk
;
*
skp
=
sk
;
sk
->
pprev
=
skp
;
sk
->
pprev
=
skp
;
...
@@ -396,8 +386,8 @@ void tcp_unhash(struct sock *sk)
...
@@ -396,8 +386,8 @@ void tcp_unhash(struct sock *sk)
write_lock_bh
(
&
head
->
lock
);
write_lock_bh
(
&
head
->
lock
);
}
}
if
(
sk
->
pprev
)
{
if
(
sk
->
pprev
)
{
if
(
sk
->
next
)
if
(
sk
->
next
)
sk
->
next
->
pprev
=
sk
->
pprev
;
sk
->
next
->
pprev
=
sk
->
pprev
;
*
sk
->
pprev
=
sk
->
next
;
*
sk
->
pprev
=
sk
->
next
;
sk
->
pprev
=
NULL
;
sk
->
pprev
=
NULL
;
...
@@ -416,20 +406,21 @@ void tcp_unhash(struct sock *sk)
...
@@ -416,20 +406,21 @@ void tcp_unhash(struct sock *sk)
* connection. So always assume those are both wildcarded
* connection. So always assume those are both wildcarded
* during the search since they can never be otherwise.
* during the search since they can never be otherwise.
*/
*/
static
struct
sock
*
__tcp_v4_lookup_listener
(
struct
sock
*
sk
,
u32
daddr
,
unsigned
short
hnum
,
int
dif
)
static
struct
sock
*
__tcp_v4_lookup_listener
(
struct
sock
*
sk
,
u32
daddr
,
unsigned
short
hnum
,
int
dif
)
{
{
struct
sock
*
result
=
NULL
;
struct
sock
*
result
=
NULL
;
int
score
,
hiscore
;
int
score
,
hiscore
;
hiscore
=
0
;
hiscore
=
0
;
for
(;
sk
;
sk
=
sk
->
next
)
{
for
(;
sk
;
sk
=
sk
->
next
)
{
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
if
(
inet
->
num
==
hnum
)
{
if
(
inet
->
num
==
hnum
)
{
__u32
rcv_saddr
=
inet
->
rcv_saddr
;
__u32
rcv_saddr
=
inet
->
rcv_saddr
;
score
=
1
;
score
=
1
;
if
(
rcv_saddr
)
{
if
(
rcv_saddr
)
{
if
(
rcv_saddr
!=
daddr
)
if
(
rcv_saddr
!=
daddr
)
continue
;
continue
;
score
++
;
score
++
;
...
@@ -451,7 +442,8 @@ static struct sock *__tcp_v4_lookup_listener(struct sock *sk, u32 daddr, unsigne
...
@@ -451,7 +442,8 @@ static struct sock *__tcp_v4_lookup_listener(struct sock *sk, u32 daddr, unsigne
}
}
/* Optimize the common listener case. */
/* Optimize the common listener case. */
__inline__
struct
sock
*
tcp_v4_lookup_listener
(
u32
daddr
,
unsigned
short
hnum
,
int
dif
)
__inline__
struct
sock
*
tcp_v4_lookup_listener
(
u32
daddr
,
unsigned
short
hnum
,
int
dif
)
{
{
struct
sock
*
sk
;
struct
sock
*
sk
;
...
@@ -460,8 +452,7 @@ __inline__ struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, i
...
@@ -460,8 +452,7 @@ __inline__ struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, i
if
(
sk
)
{
if
(
sk
)
{
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
if
(
inet
->
num
==
hnum
&&
if
(
inet
->
num
==
hnum
&&
!
sk
->
next
&&
sk
->
next
==
NULL
&&
(
!
inet
->
rcv_saddr
||
inet
->
rcv_saddr
==
daddr
)
&&
(
!
inet
->
rcv_saddr
||
inet
->
rcv_saddr
==
daddr
)
&&
!
sk
->
bound_dev_if
)
!
sk
->
bound_dev_if
)
goto
sherry_cache
;
goto
sherry_cache
;
...
@@ -482,53 +473,47 @@ __inline__ struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, i
...
@@ -482,53 +473,47 @@ __inline__ struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, i
*/
*/
static
inline
struct
sock
*
__tcp_v4_lookup_established
(
u32
saddr
,
u16
sport
,
static
inline
struct
sock
*
__tcp_v4_lookup_established
(
u32
saddr
,
u16
sport
,
u32
daddr
,
u16
hnum
,
int
dif
)
u32
daddr
,
u16
hnum
,
int
dif
)
{
{
struct
tcp_ehash_bucket
*
head
;
struct
tcp_ehash_bucket
*
head
;
TCP_V4_ADDR_COOKIE
(
acookie
,
saddr
,
daddr
)
TCP_V4_ADDR_COOKIE
(
acookie
,
saddr
,
daddr
)
__u32
ports
=
TCP_COMBINED_PORTS
(
sport
,
hnum
);
__u32
ports
=
TCP_COMBINED_PORTS
(
sport
,
hnum
);
struct
sock
*
sk
;
struct
sock
*
sk
;
int
hash
;
/* Optimize here for direct hit, only listening connections can
/* Optimize here for direct hit, only listening connections can
* have wildcards anyways.
* have wildcards anyways.
*/
*/
hash
=
tcp_hashfn
(
daddr
,
hnum
,
saddr
,
sport
);
int
hash
=
tcp_hashfn
(
daddr
,
hnum
,
saddr
,
sport
);
head
=
&
tcp_ehash
[
hash
];
head
=
&
tcp_ehash
[
hash
];
read_lock
(
&
head
->
lock
);
read_lock
(
&
head
->
lock
);
for
(
sk
=
head
->
chain
;
sk
;
sk
=
sk
->
next
)
{
for
(
sk
=
head
->
chain
;
sk
;
sk
=
sk
->
next
)
{
if
(
TCP_IPV4_MATCH
(
sk
,
acookie
,
saddr
,
daddr
,
ports
,
dif
))
if
(
TCP_IPV4_MATCH
(
sk
,
acookie
,
saddr
,
daddr
,
ports
,
dif
))
goto
hit
;
/* You sunk my battleship! */
goto
hit
;
/* You sunk my battleship! */
}
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
/* Must check for a TIME_WAIT'er before going to listener hash. */
for
(
sk
=
(
head
+
tcp_ehash_size
)
->
chain
;
sk
;
sk
=
sk
->
next
)
for
(
sk
=
(
head
+
tcp_ehash_size
)
->
chain
;
sk
;
sk
=
sk
->
next
)
if
(
TCP_IPV4_MATCH
(
sk
,
acookie
,
saddr
,
daddr
,
ports
,
dif
))
if
(
TCP_IPV4_MATCH
(
sk
,
acookie
,
saddr
,
daddr
,
ports
,
dif
))
goto
hit
;
goto
hit
;
out:
read_unlock
(
&
head
->
lock
);
read_unlock
(
&
head
->
lock
);
return
sk
;
return
NULL
;
hit:
hit:
sock_hold
(
sk
);
sock_hold
(
sk
);
read_unlock
(
&
head
->
lock
);
goto
out
;
return
sk
;
}
}
static
inline
struct
sock
*
__tcp_v4_lookup
(
u32
saddr
,
u16
sport
,
static
inline
struct
sock
*
__tcp_v4_lookup
(
u32
saddr
,
u16
sport
,
u32
daddr
,
u16
hnum
,
int
dif
)
u32
daddr
,
u16
hnum
,
int
dif
)
{
{
struct
sock
*
sk
;
struct
sock
*
sk
=
__tcp_v4_lookup_established
(
saddr
,
sport
,
daddr
,
hnum
,
dif
);
sk
=
__tcp_v4_lookup_established
(
saddr
,
sport
,
daddr
,
hnum
,
dif
);
return
sk
?
:
tcp_v4_lookup_listener
(
daddr
,
hnum
,
dif
);
if
(
sk
)
return
sk
;
return
tcp_v4_lookup_listener
(
daddr
,
hnum
,
dif
);
}
}
__inline__
struct
sock
*
tcp_v4_lookup
(
u32
saddr
,
u16
sport
,
u32
daddr
,
u16
dport
,
int
dif
)
__inline__
struct
sock
*
tcp_v4_lookup
(
u32
saddr
,
u16
sport
,
u32
daddr
,
u16
dport
,
int
dif
)
{
{
struct
sock
*
sk
;
struct
sock
*
sk
;
...
@@ -565,11 +550,11 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
...
@@ -565,11 +550,11 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
write_lock
(
&
head
->
lock
);
write_lock
(
&
head
->
lock
);
/* Check TIME-WAIT sockets first. */
/* Check TIME-WAIT sockets first. */
for
(
skp
=
&
(
head
+
tcp_ehash_size
)
->
chain
;
(
sk2
=
*
skp
)
!=
NULL
;
for
(
skp
=
&
(
head
+
tcp_ehash_size
)
->
chain
;
(
sk2
=
*
skp
)
!=
NULL
;
skp
=
&
sk2
->
next
)
{
skp
=
&
sk2
->
next
)
{
tw
=
(
struct
tcp_tw_bucket
*
)
sk2
;
tw
=
(
struct
tcp_tw_bucket
*
)
sk2
;
if
(
TCP_IPV4_MATCH
(
sk2
,
acookie
,
saddr
,
daddr
,
ports
,
dif
))
{
if
(
TCP_IPV4_MATCH
(
sk2
,
acookie
,
saddr
,
daddr
,
ports
,
dif
))
{
struct
tcp_opt
*
tp
=
tcp_sk
(
sk
);
struct
tcp_opt
*
tp
=
tcp_sk
(
sk
);
/* With PAWS, it is safe from the viewpoint
/* With PAWS, it is safe from the viewpoint
...
@@ -588,7 +573,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
...
@@ -588,7 +573,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
*/
*/
if
(
tw
->
ts_recent_stamp
&&
if
(
tw
->
ts_recent_stamp
&&
(
!
twp
||
(
sysctl_tcp_tw_reuse
&&
(
!
twp
||
(
sysctl_tcp_tw_reuse
&&
xtime
.
tv_sec
-
tw
->
ts_recent_stamp
>
1
)))
{
xtime
.
tv_sec
-
tw
->
ts_recent_stamp
>
1
)))
{
if
((
tp
->
write_seq
=
if
((
tp
->
write_seq
=
tw
->
snd_nxt
+
65535
+
2
)
==
0
)
tw
->
snd_nxt
+
65535
+
2
)
==
0
)
tp
->
write_seq
=
1
;
tp
->
write_seq
=
1
;
...
@@ -604,8 +590,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
...
@@ -604,8 +590,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
tw
=
NULL
;
tw
=
NULL
;
/* And established part... */
/* And established part... */
for
(
skp
=
&
head
->
chain
;
(
sk2
=*
skp
)
!=
NULL
;
skp
=
&
sk2
->
next
)
{
for
(
skp
=
&
head
->
chain
;
(
sk2
=
*
skp
)
!=
NULL
;
skp
=
&
sk2
->
next
)
{
if
(
TCP_IPV4_MATCH
(
sk2
,
acookie
,
saddr
,
daddr
,
ports
,
dif
))
if
(
TCP_IPV4_MATCH
(
sk2
,
acookie
,
saddr
,
daddr
,
ports
,
dif
))
goto
not_unique
;
goto
not_unique
;
}
}
...
@@ -614,7 +600,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
...
@@ -614,7 +600,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
* in hash table socket with a funny identity. */
* in hash table socket with a funny identity. */
inet
->
num
=
lport
;
inet
->
num
=
lport
;
inet
->
sport
=
htons
(
lport
);
inet
->
sport
=
htons
(
lport
);
BUG_TRAP
(
sk
->
pprev
==
NULL
);
BUG_TRAP
(
!
sk
->
pprev
);
if
((
sk
->
next
=
*
skp
)
!=
NULL
)
if
((
sk
->
next
=
*
skp
)
!=
NULL
)
(
*
skp
)
->
pprev
=
&
sk
->
next
;
(
*
skp
)
->
pprev
=
&
sk
->
next
;
...
@@ -651,8 +637,9 @@ static int tcp_v4_hash_connect(struct sock *sk)
...
@@ -651,8 +637,9 @@ static int tcp_v4_hash_connect(struct sock *sk)
unsigned
short
snum
=
inet_sk
(
sk
)
->
num
;
unsigned
short
snum
=
inet_sk
(
sk
)
->
num
;
struct
tcp_bind_hashbucket
*
head
;
struct
tcp_bind_hashbucket
*
head
;
struct
tcp_bind_bucket
*
tb
;
struct
tcp_bind_bucket
*
tb
;
int
ret
;
if
(
snum
==
0
)
{
if
(
!
snum
)
{
int
rover
;
int
rover
;
int
low
=
sysctl_local_port_range
[
0
];
int
low
=
sysctl_local_port_range
[
0
];
int
high
=
sysctl_local_port_range
[
1
];
int
high
=
sysctl_local_port_range
[
1
];
...
@@ -690,10 +677,12 @@ static int tcp_v4_hash_connect(struct sock *sk)
...
@@ -690,10 +677,12 @@ static int tcp_v4_hash_connect(struct sock *sk)
*/
*/
for
(
tb
=
head
->
chain
;
tb
;
tb
=
tb
->
next
)
{
for
(
tb
=
head
->
chain
;
tb
;
tb
=
tb
->
next
)
{
if
(
tb
->
port
==
rover
)
{
if
(
tb
->
port
==
rover
)
{
BUG_TRAP
(
tb
->
owners
!=
NULL
);
BUG_TRAP
(
tb
->
owners
);
if
(
tb
->
fastreuse
>=
0
)
if
(
tb
->
fastreuse
>=
0
)
goto
next_port
;
goto
next_port
;
if
(
!
__tcp_v4_check_established
(
sk
,
rover
,
&
tw
))
if
(
!
__tcp_v4_check_established
(
sk
,
rover
,
&
tw
))
goto
ok
;
goto
ok
;
goto
next_port
;
goto
next_port
;
}
}
...
@@ -717,7 +706,7 @@ static int tcp_v4_hash_connect(struct sock *sk)
...
@@ -717,7 +706,7 @@ static int tcp_v4_hash_connect(struct sock *sk)
return
-
EADDRNOTAVAIL
;
return
-
EADDRNOTAVAIL
;
ok:
ok:
/* All locks still held and bhs disabled */
/* All locks still held and bhs disabled */
tcp_port_rover
=
rover
;
tcp_port_rover
=
rover
;
spin_unlock
(
&
tcp_portalloc_lock
);
spin_unlock
(
&
tcp_portalloc_lock
);
...
@@ -735,22 +724,22 @@ static int tcp_v4_hash_connect(struct sock *sk)
...
@@ -735,22 +724,22 @@ static int tcp_v4_hash_connect(struct sock *sk)
tcp_tw_put
(
tw
);
tcp_tw_put
(
tw
);
}
}
local_bh_enable
()
;
ret
=
0
;
return
0
;
goto
out
;
}
}
head
=
&
tcp_bhash
[
tcp_bhashfn
(
snum
)];
head
=
&
tcp_bhash
[
tcp_bhashfn
(
snum
)];
tb
=
(
struct
tcp_bind_bucket
*
)
sk
->
prev
;
tb
=
(
struct
tcp_bind_bucket
*
)
sk
->
prev
;
spin_lock_bh
(
&
head
->
lock
);
spin_lock_bh
(
&
head
->
lock
);
if
(
tb
->
owners
==
sk
&&
sk
->
bind_next
==
NULL
)
{
if
(
tb
->
owners
==
sk
&&
!
sk
->
bind_next
)
{
__tcp_v4_hash
(
sk
,
0
);
__tcp_v4_hash
(
sk
,
0
);
spin_unlock_bh
(
&
head
->
lock
);
spin_unlock_bh
(
&
head
->
lock
);
return
0
;
return
0
;
}
else
{
}
else
{
int
ret
;
spin_unlock
(
&
head
->
lock
);
spin_unlock
(
&
head
->
lock
);
/* No definite answer... Walk to established hash table */
/* No definite answer... Walk to established hash table */
ret
=
__tcp_v4_check_established
(
sk
,
snum
,
NULL
);
ret
=
__tcp_v4_check_established
(
sk
,
snum
,
NULL
);
out:
local_bh_enable
();
local_bh_enable
();
return
ret
;
return
ret
;
}
}
...
@@ -761,21 +750,21 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
...
@@ -761,21 +750,21 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
{
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
struct
tcp_opt
*
tp
=
tcp_sk
(
sk
);
struct
tcp_opt
*
tp
=
tcp_sk
(
sk
);
struct
sockaddr_in
*
usin
=
(
struct
sockaddr_in
*
)
uaddr
;
struct
sockaddr_in
*
usin
=
(
struct
sockaddr_in
*
)
uaddr
;
struct
rtable
*
rt
;
struct
rtable
*
rt
;
u32
daddr
,
nexthop
;
u32
daddr
,
nexthop
;
int
tmp
;
int
tmp
;
int
err
;
int
err
;
if
(
addr_len
<
sizeof
(
struct
sockaddr_in
))
if
(
addr_len
<
sizeof
(
struct
sockaddr_in
))
return
(
-
EINVAL
)
;
return
-
EINVAL
;
if
(
usin
->
sin_family
!=
AF_INET
)
if
(
usin
->
sin_family
!=
AF_INET
)
return
(
-
EAFNOSUPPORT
)
;
return
-
EAFNOSUPPORT
;
nexthop
=
daddr
=
usin
->
sin_addr
.
s_addr
;
nexthop
=
daddr
=
usin
->
sin_addr
.
s_addr
;
if
(
inet
->
opt
&&
inet
->
opt
->
srr
)
{
if
(
inet
->
opt
&&
inet
->
opt
->
srr
)
{
if
(
daddr
==
0
)
if
(
!
daddr
)
return
-
EINVAL
;
return
-
EINVAL
;
nexthop
=
inet
->
opt
->
faddr
;
nexthop
=
inet
->
opt
->
faddr
;
}
}
...
@@ -785,7 +774,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
...
@@ -785,7 +774,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if
(
tmp
<
0
)
if
(
tmp
<
0
)
return
tmp
;
return
tmp
;
if
(
rt
->
rt_flags
&
(
RTCF_MULTICAST
|
RTCF_BROADCAST
))
{
if
(
rt
->
rt_flags
&
(
RTCF_MULTICAST
|
RTCF_BROADCAST
))
{
ip_rt_put
(
rt
);
ip_rt_put
(
rt
);
return
-
ENETUNREACH
;
return
-
ENETUNREACH
;
}
}
...
@@ -808,8 +797,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
...
@@ -808,8 +797,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
}
if
(
sysctl_tcp_tw_recycle
&&
if
(
sysctl_tcp_tw_recycle
&&
!
tp
->
ts_recent_stamp
&&
!
tp
->
ts_recent_stamp
&&
rt
->
rt_dst
==
daddr
)
{
rt
->
rt_dst
==
daddr
)
{
struct
inet_peer
*
peer
=
rt_get_peer
(
rt
);
struct
inet_peer
*
peer
=
rt_get_peer
(
rt
);
/* VJ's idea. We save last timestamp seen from
/* VJ's idea. We save last timestamp seen from
...
@@ -866,15 +854,15 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
...
@@ -866,15 +854,15 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
static
__inline__
int
tcp_v4_iif
(
struct
sk_buff
*
skb
)
static
__inline__
int
tcp_v4_iif
(
struct
sk_buff
*
skb
)
{
{
return
((
struct
rtable
*
)
skb
->
dst
)
->
rt_iif
;
return
((
struct
rtable
*
)
skb
->
dst
)
->
rt_iif
;
}
}
static
__inline__
unsigned
tcp_v4_synq_hash
(
u32
raddr
,
u16
rport
)
static
__inline__
unsigned
tcp_v4_synq_hash
(
u32
raddr
,
u16
rport
)
{
{
unsigned
h
=
raddr
^
rport
;
unsigned
h
=
raddr
^
rport
;
h
^=
h
>>
16
;
h
^=
h
>>
16
;
h
^=
h
>>
8
;
h
^=
h
>>
8
;
return
h
&
(
TCP_SYNQ_HSIZE
-
1
);
return
h
&
(
TCP_SYNQ_HSIZE
-
1
);
}
}
static
struct
open_request
*
tcp_v4_search_req
(
struct
tcp_opt
*
tp
,
static
struct
open_request
*
tcp_v4_search_req
(
struct
tcp_opt
*
tp
,
...
@@ -892,13 +880,13 @@ static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
...
@@ -892,13 +880,13 @@ static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
req
->
af
.
v4_req
.
rmt_addr
==
raddr
&&
req
->
af
.
v4_req
.
rmt_addr
==
raddr
&&
req
->
af
.
v4_req
.
loc_addr
==
laddr
&&
req
->
af
.
v4_req
.
loc_addr
==
laddr
&&
TCP_INET_FAMILY
(
req
->
class
->
family
))
{
TCP_INET_FAMILY
(
req
->
class
->
family
))
{
BUG_TRAP
(
req
->
sk
==
NULL
);
BUG_TRAP
(
!
req
->
sk
);
*
prevp
=
prev
;
*
prevp
=
prev
;
return
req
;
break
;
}
}
}
}
return
NULL
;
return
req
;
}
}
static
void
tcp_v4_synq_add
(
struct
sock
*
sk
,
struct
open_request
*
req
)
static
void
tcp_v4_synq_add
(
struct
sock
*
sk
,
struct
open_request
*
req
)
...
@@ -985,8 +973,8 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
...
@@ -985,8 +973,8 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
void
tcp_v4_err
(
struct
sk_buff
*
skb
,
u32
info
)
void
tcp_v4_err
(
struct
sk_buff
*
skb
,
u32
info
)
{
{
struct
iphdr
*
iph
=
(
struct
iphdr
*
)
skb
->
data
;
struct
iphdr
*
iph
=
(
struct
iphdr
*
)
skb
->
data
;
struct
tcphdr
*
th
=
(
struct
tcphdr
*
)(
skb
->
data
+
(
iph
->
ihl
<<
2
));
struct
tcphdr
*
th
=
(
struct
tcphdr
*
)(
skb
->
data
+
(
iph
->
ihl
<<
2
));
struct
tcp_opt
*
tp
;
struct
tcp_opt
*
tp
;
struct
inet_opt
*
inet
;
struct
inet_opt
*
inet
;
int
type
=
skb
->
h
.
icmph
->
type
;
int
type
=
skb
->
h
.
icmph
->
type
;
...
@@ -1000,13 +988,14 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
...
@@ -1000,13 +988,14 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
return
;
return
;
}
}
sk
=
tcp_v4_lookup
(
iph
->
daddr
,
th
->
dest
,
iph
->
saddr
,
th
->
source
,
tcp_v4_iif
(
skb
));
sk
=
tcp_v4_lookup
(
iph
->
daddr
,
th
->
dest
,
iph
->
saddr
,
if
(
sk
==
NULL
)
{
th
->
source
,
tcp_v4_iif
(
skb
));
if
(
!
sk
)
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
return
;
}
}
if
(
sk
->
state
==
TCP_TIME_WAIT
)
{
if
(
sk
->
state
==
TCP_TIME_WAIT
)
{
tcp_tw_put
((
struct
tcp_tw_bucket
*
)
sk
);
tcp_tw_put
((
struct
tcp_tw_bucket
*
)
sk
);
return
;
return
;
}
}
...
@@ -1014,7 +1003,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
...
@@ -1014,7 +1003,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
/* If too many ICMPs get dropped on busy
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
* servers this needs to be solved differently.
*/
*/
if
(
sk
->
lock
.
users
!=
0
)
if
(
sk
->
lock
.
users
)
NET_INC_STATS_BH
(
LockDroppedIcmps
);
NET_INC_STATS_BH
(
LockDroppedIcmps
);
if
(
sk
->
state
==
TCP_CLOSE
)
if
(
sk
->
state
==
TCP_CLOSE
)
...
@@ -1033,7 +1022,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
...
@@ -1033,7 +1022,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
/* This is deprecated, but if someone generated it,
/* This is deprecated, but if someone generated it,
* we have no reasons to ignore it.
* we have no reasons to ignore it.
*/
*/
if
(
sk
->
lock
.
users
==
0
)
if
(
!
sk
->
lock
.
users
)
tcp_enter_cwr
(
tp
);
tcp_enter_cwr
(
tp
);
goto
out
;
goto
out
;
case
ICMP_PARAMETERPROB
:
case
ICMP_PARAMETERPROB
:
...
@@ -1044,7 +1033,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
...
@@ -1044,7 +1033,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
goto
out
;
goto
out
;
if
(
code
==
ICMP_FRAG_NEEDED
)
{
/* PMTU discovery (RFC1191) */
if
(
code
==
ICMP_FRAG_NEEDED
)
{
/* PMTU discovery (RFC1191) */
if
(
sk
->
lock
.
users
==
0
)
if
(
!
sk
->
lock
.
users
)
do_pmtu_discovery
(
sk
,
iph
,
info
);
do_pmtu_discovery
(
sk
,
iph
,
info
);
goto
out
;
goto
out
;
}
}
...
@@ -1061,11 +1050,10 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
...
@@ -1061,11 +1050,10 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
switch
(
sk
->
state
)
{
switch
(
sk
->
state
)
{
struct
open_request
*
req
,
**
prev
;
struct
open_request
*
req
,
**
prev
;
case
TCP_LISTEN
:
case
TCP_LISTEN
:
if
(
sk
->
lock
.
users
!=
0
)
if
(
sk
->
lock
.
users
)
goto
out
;
goto
out
;
req
=
tcp_v4_search_req
(
tp
,
&
prev
,
req
=
tcp_v4_search_req
(
tp
,
&
prev
,
th
->
dest
,
th
->
dest
,
iph
->
daddr
,
iph
->
saddr
);
iph
->
daddr
,
iph
->
saddr
);
if
(
!
req
)
if
(
!
req
)
goto
out
;
goto
out
;
...
@@ -1073,7 +1061,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
...
@@ -1073,7 +1061,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
/* ICMPs are not backlogged, hence we cannot get
/* ICMPs are not backlogged, hence we cannot get
an established socket here.
an established socket here.
*/
*/
BUG_TRAP
(
req
->
sk
==
NULL
);
BUG_TRAP
(
!
req
->
sk
);
if
(
seq
!=
req
->
snt_isn
)
{
if
(
seq
!=
req
->
snt_isn
)
{
NET_INC_STATS_BH
(
OutOfWindowIcmps
);
NET_INC_STATS_BH
(
OutOfWindowIcmps
);
...
@@ -1093,7 +1081,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
...
@@ -1093,7 +1081,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
case
TCP_SYN_RECV
:
/* Cannot happen.
case
TCP_SYN_RECV
:
/* Cannot happen.
It can f.e. if SYNs crossed.
It can f.e. if SYNs crossed.
*/
*/
if
(
sk
->
lock
.
users
==
0
)
{
if
(
!
sk
->
lock
.
users
)
{
TCP_INC_STATS_BH
(
TcpAttemptFails
);
TCP_INC_STATS_BH
(
TcpAttemptFails
);
sk
->
err
=
err
;
sk
->
err
=
err
;
...
@@ -1123,7 +1111,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
...
@@ -1123,7 +1111,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
*/
*/
inet
=
inet_sk
(
sk
);
inet
=
inet_sk
(
sk
);
if
(
sk
->
lock
.
users
==
0
&&
inet
->
recverr
)
{
if
(
!
sk
->
lock
.
users
&&
inet
->
recverr
)
{
sk
->
err
=
err
;
sk
->
err
=
err
;
sk
->
error_report
(
sk
);
sk
->
error_report
(
sk
);
}
else
{
/* Only an error on timeout */
}
else
{
/* Only an error on timeout */
...
@@ -1146,7 +1134,9 @@ void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
...
@@ -1146,7 +1134,9 @@ void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
skb
->
csum
=
offsetof
(
struct
tcphdr
,
check
);
skb
->
csum
=
offsetof
(
struct
tcphdr
,
check
);
}
else
{
}
else
{
th
->
check
=
tcp_v4_check
(
th
,
len
,
inet
->
saddr
,
inet
->
daddr
,
th
->
check
=
tcp_v4_check
(
th
,
len
,
inet
->
saddr
,
inet
->
daddr
,
csum_partial
((
char
*
)
th
,
th
->
doff
<<
2
,
skb
->
csum
));
csum_partial
((
char
*
)
th
,
th
->
doff
<<
2
,
skb
->
csum
));
}
}
}
}
...
@@ -1173,22 +1163,22 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
...
@@ -1173,22 +1163,22 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
if
(
th
->
rst
)
if
(
th
->
rst
)
return
;
return
;
if
(((
struct
rtable
*
)
skb
->
dst
)
->
rt_type
!=
RTN_LOCAL
)
if
(((
struct
rtable
*
)
skb
->
dst
)
->
rt_type
!=
RTN_LOCAL
)
return
;
return
;
/* Swap the send and the receive. */
/* Swap the send and the receive. */
memset
(
&
rth
,
0
,
sizeof
(
struct
tcphdr
));
memset
(
&
rth
,
0
,
sizeof
(
struct
tcphdr
));
rth
.
dest
=
th
->
source
;
rth
.
dest
=
th
->
source
;
rth
.
source
=
th
->
dest
;
rth
.
source
=
th
->
dest
;
rth
.
doff
=
sizeof
(
struct
tcphdr
)
/
4
;
rth
.
doff
=
sizeof
(
struct
tcphdr
)
/
4
;
rth
.
rst
=
1
;
rth
.
rst
=
1
;
if
(
th
->
ack
)
{
if
(
th
->
ack
)
{
rth
.
seq
=
th
->
ack_seq
;
rth
.
seq
=
th
->
ack_seq
;
}
else
{
}
else
{
rth
.
ack
=
1
;
rth
.
ack
=
1
;
rth
.
ack_seq
=
htonl
(
ntohl
(
th
->
seq
)
+
th
->
syn
+
th
->
fin
rth
.
ack_seq
=
htonl
(
ntohl
(
th
->
seq
)
+
th
->
syn
+
th
->
fin
+
+
skb
->
len
-
(
th
->
doff
<<
2
));
skb
->
len
-
(
th
->
doff
<<
2
));
}
}
memset
(
&
arg
,
0
,
sizeof
arg
);
memset
(
&
arg
,
0
,
sizeof
arg
);
...
@@ -1196,9 +1186,7 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
...
@@ -1196,9 +1186,7 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
arg
.
iov
[
0
].
iov_len
=
sizeof
rth
;
arg
.
iov
[
0
].
iov_len
=
sizeof
rth
;
arg
.
csum
=
csum_tcpudp_nofold
(
skb
->
nh
.
iph
->
daddr
,
arg
.
csum
=
csum_tcpudp_nofold
(
skb
->
nh
.
iph
->
daddr
,
skb
->
nh
.
iph
->
saddr
,
/*XXX*/
skb
->
nh
.
iph
->
saddr
,
/*XXX*/
sizeof
(
struct
tcphdr
),
sizeof
(
struct
tcphdr
),
IPPROTO_TCP
,
0
);
IPPROTO_TCP
,
0
);
arg
.
n_iov
=
1
;
arg
.
n_iov
=
1
;
arg
.
csumoffset
=
offsetof
(
struct
tcphdr
,
check
)
/
2
;
arg
.
csumoffset
=
offsetof
(
struct
tcphdr
,
check
)
/
2
;
...
@@ -1213,7 +1201,8 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
...
@@ -1213,7 +1201,8 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
outside socket context is ugly, certainly. What can I do?
outside socket context is ugly, certainly. What can I do?
*/
*/
static
void
tcp_v4_send_ack
(
struct
sk_buff
*
skb
,
u32
seq
,
u32
ack
,
u32
win
,
u32
ts
)
static
void
tcp_v4_send_ack
(
struct
sk_buff
*
skb
,
u32
seq
,
u32
ack
,
u32
win
,
u32
ts
)
{
{
struct
tcphdr
*
th
=
skb
->
h
.
th
;
struct
tcphdr
*
th
=
skb
->
h
.
th
;
struct
{
struct
{
...
@@ -1229,8 +1218,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
...
@@ -1229,8 +1218,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
arg
.
iov
[
0
].
iov_len
=
sizeof
(
rep
.
th
);
arg
.
iov
[
0
].
iov_len
=
sizeof
(
rep
.
th
);
arg
.
n_iov
=
1
;
arg
.
n_iov
=
1
;
if
(
ts
)
{
if
(
ts
)
{
rep
.
tsopt
[
0
]
=
__constant_htonl
((
TCPOPT_NOP
<<
24
)
|
rep
.
tsopt
[
0
]
=
htonl
((
TCPOPT_NOP
<<
24
)
|
(
TCPOPT_NOP
<<
16
)
|
(
TCPOPT_NOP
<<
16
)
|
(
TCPOPT_TIMESTAMP
<<
8
)
|
(
TCPOPT_TIMESTAMP
<<
8
)
|
TCPOLEN_TIMESTAMP
);
TCPOLEN_TIMESTAMP
);
rep
.
tsopt
[
1
]
=
htonl
(
tcp_time_stamp
);
rep
.
tsopt
[
1
]
=
htonl
(
tcp_time_stamp
);
...
@@ -1241,7 +1229,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
...
@@ -1241,7 +1229,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
/* Swap the send and the receive. */
/* Swap the send and the receive. */
rep
.
th
.
dest
=
th
->
source
;
rep
.
th
.
dest
=
th
->
source
;
rep
.
th
.
source
=
th
->
dest
;
rep
.
th
.
source
=
th
->
dest
;
rep
.
th
.
doff
=
arg
.
iov
[
0
].
iov_len
/
4
;
rep
.
th
.
doff
=
arg
.
iov
[
0
].
iov_len
/
4
;
rep
.
th
.
seq
=
htonl
(
seq
);
rep
.
th
.
seq
=
htonl
(
seq
);
rep
.
th
.
ack_seq
=
htonl
(
ack
);
rep
.
th
.
ack_seq
=
htonl
(
ack
);
rep
.
th
.
ack
=
1
;
rep
.
th
.
ack
=
1
;
...
@@ -1249,9 +1237,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
...
@@ -1249,9 +1237,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
arg
.
csum
=
csum_tcpudp_nofold
(
skb
->
nh
.
iph
->
daddr
,
arg
.
csum
=
csum_tcpudp_nofold
(
skb
->
nh
.
iph
->
daddr
,
skb
->
nh
.
iph
->
saddr
,
/*XXX*/
skb
->
nh
.
iph
->
saddr
,
/*XXX*/
arg
.
iov
[
0
].
iov_len
,
arg
.
iov
[
0
].
iov_len
,
IPPROTO_TCP
,
0
);
IPPROTO_TCP
,
0
);
arg
.
csumoffset
=
offsetof
(
struct
tcphdr
,
check
)
/
2
;
arg
.
csumoffset
=
offsetof
(
struct
tcphdr
,
check
)
/
2
;
ip_send_reply
(
tcp_socket
->
sk
,
skb
,
&
arg
,
arg
.
iov
[
0
].
iov_len
);
ip_send_reply
(
tcp_socket
->
sk
,
skb
,
&
arg
,
arg
.
iov
[
0
].
iov_len
);
...
@@ -1264,25 +1250,24 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
...
@@ -1264,25 +1250,24 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
struct
tcp_tw_bucket
*
tw
=
(
struct
tcp_tw_bucket
*
)
sk
;
struct
tcp_tw_bucket
*
tw
=
(
struct
tcp_tw_bucket
*
)
sk
;
tcp_v4_send_ack
(
skb
,
tw
->
snd_nxt
,
tw
->
rcv_nxt
,
tcp_v4_send_ack
(
skb
,
tw
->
snd_nxt
,
tw
->
rcv_nxt
,
tw
->
rcv_wnd
>>
tw
->
rcv_wscale
,
tw
->
ts_recent
);
tw
->
rcv_wnd
>>
tw
->
rcv_wscale
,
tw
->
ts_recent
);
tcp_tw_put
(
tw
);
tcp_tw_put
(
tw
);
}
}
static
void
tcp_v4_or_send_ack
(
struct
sk_buff
*
skb
,
struct
open_request
*
req
)
static
void
tcp_v4_or_send_ack
(
struct
sk_buff
*
skb
,
struct
open_request
*
req
)
{
{
tcp_v4_send_ack
(
skb
,
req
->
snt_isn
+
1
,
req
->
rcv_isn
+
1
,
req
->
rcv_wnd
,
tcp_v4_send_ack
(
skb
,
req
->
snt_isn
+
1
,
req
->
rcv_isn
+
1
,
req
->
rcv_wnd
,
req
->
ts_recent
);
req
->
ts_recent
);
}
}
static
struct
dst_entry
*
tcp_v4_route_req
(
struct
sock
*
sk
,
struct
open_request
*
req
)
static
struct
dst_entry
*
tcp_v4_route_req
(
struct
sock
*
sk
,
struct
open_request
*
req
)
{
{
struct
rtable
*
rt
;
struct
rtable
*
rt
;
struct
ip_options
*
opt
;
struct
ip_options
*
opt
=
req
->
af
.
v4_req
.
opt
;
opt
=
req
->
af
.
v4_req
.
opt
;
if
(
ip_route_output
(
&
rt
,
((
opt
&&
opt
->
srr
)
?
opt
->
faddr
:
if
(
ip_route_output
(
&
rt
,
((
opt
&&
opt
->
srr
)
?
opt
->
faddr
:
req
->
af
.
v4_req
.
rmt_addr
),
req
->
af
.
v4_req
.
rmt_addr
),
req
->
af
.
v4_req
.
loc_addr
,
req
->
af
.
v4_req
.
loc_addr
,
RT_CONN_FLAGS
(
sk
),
sk
->
bound_dev_if
))
{
RT_CONN_FLAGS
(
sk
),
sk
->
bound_dev_if
))
{
...
@@ -1309,8 +1294,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
...
@@ -1309,8 +1294,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
struct
sk_buff
*
skb
;
struct
sk_buff
*
skb
;
/* First, grab a route. */
/* First, grab a route. */
if
(
dst
==
NULL
&&
if
(
!
dst
&&
(
dst
=
tcp_v4_route_req
(
sk
,
req
))
==
NULL
)
(
dst
=
tcp_v4_route_req
(
sk
,
req
))
==
NULL
)
goto
out
;
goto
out
;
skb
=
tcp_make_synack
(
sk
,
dst
,
req
);
skb
=
tcp_make_synack
(
sk
,
dst
,
req
);
...
@@ -1319,11 +1303,14 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
...
@@ -1319,11 +1303,14 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
struct
tcphdr
*
th
=
skb
->
h
.
th
;
struct
tcphdr
*
th
=
skb
->
h
.
th
;
th
->
check
=
tcp_v4_check
(
th
,
skb
->
len
,
th
->
check
=
tcp_v4_check
(
th
,
skb
->
len
,
req
->
af
.
v4_req
.
loc_addr
,
req
->
af
.
v4_req
.
rmt_addr
,
req
->
af
.
v4_req
.
loc_addr
,
csum_partial
((
char
*
)
th
,
skb
->
len
,
skb
->
csum
));
req
->
af
.
v4_req
.
rmt_addr
,
csum_partial
((
char
*
)
th
,
skb
->
len
,
skb
->
csum
));
err
=
ip_build_and_send_pkt
(
skb
,
sk
,
req
->
af
.
v4_req
.
loc_addr
,
err
=
ip_build_and_send_pkt
(
skb
,
sk
,
req
->
af
.
v4_req
.
loc_addr
,
req
->
af
.
v4_req
.
rmt_addr
,
req
->
af
.
v4_req
.
opt
);
req
->
af
.
v4_req
.
rmt_addr
,
req
->
af
.
v4_req
.
opt
);
if
(
err
==
NET_XMIT_CN
)
if
(
err
==
NET_XMIT_CN
)
err
=
0
;
err
=
0
;
}
}
...
@@ -1346,7 +1333,7 @@ static inline void syn_flood_warning(struct sk_buff *skb)
...
@@ -1346,7 +1333,7 @@ static inline void syn_flood_warning(struct sk_buff *skb)
{
{
static
unsigned
long
warntime
;
static
unsigned
long
warntime
;
if
(
jiffies
-
warntime
>
HZ
*
60
)
{
if
(
jiffies
-
warntime
>
HZ
*
60
)
{
warntime
=
jiffies
;
warntime
=
jiffies
;
printk
(
KERN_INFO
printk
(
KERN_INFO
"possible SYN flooding on port %d. Sending cookies.
\n
"
,
"possible SYN flooding on port %d. Sending cookies.
\n
"
,
...
@@ -1357,8 +1344,8 @@ static inline void syn_flood_warning(struct sk_buff *skb)
...
@@ -1357,8 +1344,8 @@ static inline void syn_flood_warning(struct sk_buff *skb)
/*
/*
* Save and compile IPv4 options into the open_request if needed.
* Save and compile IPv4 options into the open_request if needed.
*/
*/
static
inline
struct
ip_options
*
static
inline
struct
ip_options
*
tcp_v4_save_options
(
struct
sock
*
sk
,
tcp_v4_save_options
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
)
struct
sk_buff
*
skb
)
{
{
struct
ip_options
*
opt
=
&
(
IPCB
(
skb
)
->
opt
);
struct
ip_options
*
opt
=
&
(
IPCB
(
skb
)
->
opt
);
struct
ip_options
*
dopt
=
NULL
;
struct
ip_options
*
dopt
=
NULL
;
...
@@ -1392,11 +1379,11 @@ tcp_v4_save_options(struct sock *sk, struct sk_buff *skb)
...
@@ -1392,11 +1379,11 @@ tcp_v4_save_options(struct sock *sk, struct sk_buff *skb)
int
sysctl_max_syn_backlog
=
256
;
int
sysctl_max_syn_backlog
=
256
;
struct
or_calltable
or_ipv4
=
{
struct
or_calltable
or_ipv4
=
{
PF_INET
,
family:
PF_INET
,
tcp_v4_send_synack
,
rtx_syn_ack:
tcp_v4_send_synack
,
tcp_v4_or_send_ack
,
send_ack:
tcp_v4_or_send_ack
,
tcp_v4_or_free
,
destructor:
tcp_v4_or_free
,
tcp_v4_send_reset
send_reset:
tcp_v4_send_reset
,
};
};
int
tcp_v4_conn_request
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
)
int
tcp_v4_conn_request
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
)
...
@@ -1415,7 +1402,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
...
@@ -1415,7 +1402,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
/* Never answer to SYNs send to broadcast or multicast */
/* Never answer to SYNs send to broadcast or multicast */
if
(((
struct
rtable
*
)
skb
->
dst
)
->
rt_flags
&
if
(((
struct
rtable
*
)
skb
->
dst
)
->
rt_flags
&
(
RTCF_BROADCAST
|
RTCF_MULTICAST
))
(
RTCF_BROADCAST
|
RTCF_MULTICAST
))
goto
drop
;
goto
drop
;
/* TW buckets are converted to open requests without
/* TW buckets are converted to open requests without
...
@@ -1440,7 +1427,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
...
@@ -1440,7 +1427,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto
drop
;
goto
drop
;
req
=
tcp_openreq_alloc
();
req
=
tcp_openreq_alloc
();
if
(
req
==
NULL
)
if
(
!
req
)
goto
drop
;
goto
drop
;
tcp_clear_options
(
&
tp
);
tcp_clear_options
(
&
tp
);
...
@@ -1454,7 +1441,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
...
@@ -1454,7 +1441,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tp
.
saw_tstamp
=
0
;
tp
.
saw_tstamp
=
0
;
}
}
if
(
tp
.
saw_tstamp
&&
tp
.
rcv_tsval
==
0
)
{
if
(
tp
.
saw_tstamp
&&
!
tp
.
rcv_tsval
)
{
/* Some OSes (unknown ones, but I see them on web server, which
/* Some OSes (unknown ones, but I see them on web server, which
* contains information interesting only for windows'
* contains information interesting only for windows'
* users) do not send their stamp in SYN. It is easy case.
* users) do not send their stamp in SYN. It is easy case.
...
@@ -1479,7 +1466,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
...
@@ -1479,7 +1466,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
syn_flood_warning
(
skb
);
syn_flood_warning
(
skb
);
#endif
#endif
isn
=
cookie_v4_init_sequence
(
sk
,
skb
,
&
req
->
mss
);
isn
=
cookie_v4_init_sequence
(
sk
,
skb
,
&
req
->
mss
);
}
else
if
(
isn
==
0
)
{
}
else
if
(
!
isn
)
{
struct
inet_peer
*
peer
=
NULL
;
struct
inet_peer
*
peer
=
NULL
;
/* VJ's idea. We save last timestamp seen
/* VJ's idea. We save last timestamp seen
...
@@ -1494,10 +1481,11 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
...
@@ -1494,10 +1481,11 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if
(
tp
.
saw_tstamp
&&
if
(
tp
.
saw_tstamp
&&
sysctl_tcp_tw_recycle
&&
sysctl_tcp_tw_recycle
&&
(
dst
=
tcp_v4_route_req
(
sk
,
req
))
!=
NULL
&&
(
dst
=
tcp_v4_route_req
(
sk
,
req
))
!=
NULL
&&
(
peer
=
rt_get_peer
((
struct
rtable
*
)
dst
))
!=
NULL
&&
(
peer
=
rt_get_peer
((
struct
rtable
*
)
dst
))
!=
NULL
&&
peer
->
v4daddr
==
saddr
)
{
peer
->
v4daddr
==
saddr
)
{
if
(
xtime
.
tv_sec
<
peer
->
tcp_ts_stamp
+
TCP_PAWS_MSL
&&
if
(
xtime
.
tv_sec
<
peer
->
tcp_ts_stamp
+
TCP_PAWS_MSL
&&
(
s32
)(
peer
->
tcp_ts
-
req
->
ts_recent
)
>
TCP_PAWS_WINDOW
)
{
(
s32
)(
peer
->
tcp_ts
-
req
->
ts_recent
)
>
TCP_PAWS_WINDOW
)
{
NET_INC_STATS_BH
(
PAWSPassiveRejected
);
NET_INC_STATS_BH
(
PAWSPassiveRejected
);
dst_release
(
dst
);
dst_release
(
dst
);
goto
drop_and_free
;
goto
drop_and_free
;
...
@@ -1505,19 +1493,23 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
...
@@ -1505,19 +1493,23 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
}
}
/* Kill the following clause, if you dislike this way. */
/* Kill the following clause, if you dislike this way. */
else
if
(
!
sysctl_tcp_syncookies
&&
else
if
(
!
sysctl_tcp_syncookies
&&
(
sysctl_max_syn_backlog
-
tcp_synq_len
(
sk
)
(
sysctl_max_syn_backlog
-
tcp_synq_len
(
sk
)
<
<
(
sysctl_max_syn_backlog
>>
2
))
&&
(
sysctl_max_syn_backlog
>>
2
))
&&
(
!
peer
||
!
peer
->
tcp_ts_stamp
)
&&
(
!
peer
||
!
peer
->
tcp_ts_stamp
)
&&
(
!
dst
||
!
dst
->
rtt
))
{
(
!
dst
||
!
dst
->
rtt
))
{
/* Without syncookies last quarter of
/* Without syncookies last quarter of
* backlog is filled with destinations, proven to be alive.
* backlog is filled with destinations,
* proven to be alive.
* It means that we continue to communicate
* It means that we continue to communicate
* to destinations, already remembered
* to destinations, already remembered
* to the moment of synflood.
* to the moment of synflood.
*/
*/
NETDEBUG
(
if
(
net_ratelimit
())
\
NETDEBUG
(
if
(
net_ratelimit
())
\
printk
(
KERN_DEBUG
"TCP: drop open request from %u.%u.%u.%u/%u
\n
"
,
\
printk
(
KERN_DEBUG
"TCP: drop open "
NIPQUAD
(
saddr
),
ntohs
(
skb
->
h
.
th
->
source
)));
"request from %u.%u."
"%u.%u/%u
\n
"
,
\
NIPQUAD
(
saddr
),
ntohs
(
skb
->
h
.
th
->
source
)));
dst_release
(
dst
);
dst_release
(
dst
);
goto
drop_and_free
;
goto
drop_and_free
;
}
}
...
@@ -1548,7 +1540,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
...
@@ -1548,7 +1540,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* The three way handshake has completed - we got a valid synack -
* The three way handshake has completed - we got a valid synack -
* now create the new socket.
* now create the new socket.
*/
*/
struct
sock
*
tcp_v4_syn_recv_sock
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
,
struct
sock
*
tcp_v4_syn_recv_sock
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
,
struct
open_request
*
req
,
struct
open_request
*
req
,
struct
dst_entry
*
dst
)
struct
dst_entry
*
dst
)
{
{
...
@@ -1559,8 +1551,7 @@ struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
...
@@ -1559,8 +1551,7 @@ struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
if
(
tcp_acceptq_is_full
(
sk
))
if
(
tcp_acceptq_is_full
(
sk
))
goto
exit_overflow
;
goto
exit_overflow
;
if
(
dst
==
NULL
&&
if
(
!
dst
&&
(
dst
=
tcp_v4_route_req
(
sk
,
req
))
==
NULL
)
(
dst
=
tcp_v4_route_req
(
sk
,
req
))
==
NULL
)
goto
exit
;
goto
exit
;
newsk
=
tcp_create_openreq_child
(
sk
,
req
,
skb
);
newsk
=
tcp_create_openreq_child
(
sk
,
req
,
skb
);
...
@@ -1601,17 +1592,15 @@ struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
...
@@ -1601,17 +1592,15 @@ struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
return
NULL
;
return
NULL
;
}
}
static
struct
sock
*
tcp_v4_hnd_req
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
)
static
struct
sock
*
tcp_v4_hnd_req
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
)
{
{
struct
open_request
*
req
,
**
prev
;
struct
tcphdr
*
th
=
skb
->
h
.
th
;
struct
tcphdr
*
th
=
skb
->
h
.
th
;
struct
iphdr
*
iph
=
skb
->
nh
.
iph
;
struct
iphdr
*
iph
=
skb
->
nh
.
iph
;
struct
tcp_opt
*
tp
=
tcp_sk
(
sk
);
struct
tcp_opt
*
tp
=
tcp_sk
(
sk
);
struct
sock
*
nsk
;
struct
sock
*
nsk
;
struct
open_request
**
prev
;
/* Find possible connection requests. */
/* Find possible connection requests. */
req
=
tcp_v4_search_req
(
tp
,
&
prev
,
struct
open_request
*
req
=
tcp_v4_search_req
(
tp
,
&
prev
,
th
->
source
,
th
->
source
,
iph
->
saddr
,
iph
->
daddr
);
iph
->
saddr
,
iph
->
daddr
);
if
(
req
)
if
(
req
)
return
tcp_check_req
(
sk
,
skb
,
req
,
prev
);
return
tcp_check_req
(
sk
,
skb
,
req
,
prev
);
...
@@ -1627,7 +1616,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk,struct sk_buff *skb)
...
@@ -1627,7 +1616,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk,struct sk_buff *skb)
bh_lock_sock
(
nsk
);
bh_lock_sock
(
nsk
);
return
nsk
;
return
nsk
;
}
}
tcp_tw_put
((
struct
tcp_tw_bucket
*
)
nsk
);
tcp_tw_put
((
struct
tcp_tw_bucket
*
)
nsk
);
return
NULL
;
return
NULL
;
}
}
...
@@ -1642,22 +1631,24 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
...
@@ -1642,22 +1631,24 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
{
{
if
(
skb
->
ip_summed
==
CHECKSUM_HW
)
{
if
(
skb
->
ip_summed
==
CHECKSUM_HW
)
{
skb
->
ip_summed
=
CHECKSUM_UNNECESSARY
;
skb
->
ip_summed
=
CHECKSUM_UNNECESSARY
;
if
(
!
tcp_v4_check
(
skb
->
h
.
th
,
skb
->
len
,
skb
->
nh
.
iph
->
saddr
,
if
(
!
tcp_v4_check
(
skb
->
h
.
th
,
skb
->
len
,
skb
->
nh
.
iph
->
saddr
,
skb
->
nh
.
iph
->
daddr
,
skb
->
csum
))
skb
->
nh
.
iph
->
daddr
,
skb
->
csum
))
return
0
;
return
0
;
NETDEBUG
(
if
(
net_ratelimit
())
printk
(
KERN_DEBUG
"hw tcp v4 csum failed
\n
"
));
NETDEBUG
(
if
(
net_ratelimit
())
printk
(
KERN_DEBUG
"hw tcp v4 csum failed
\n
"
));
skb
->
ip_summed
=
CHECKSUM_NONE
;
skb
->
ip_summed
=
CHECKSUM_NONE
;
}
}
if
(
skb
->
len
<=
76
)
{
if
(
skb
->
len
<=
76
)
{
if
(
tcp_v4_check
(
skb
->
h
.
th
,
skb
->
len
,
skb
->
nh
.
iph
->
saddr
,
if
(
tcp_v4_check
(
skb
->
h
.
th
,
skb
->
len
,
skb
->
nh
.
iph
->
saddr
,
skb
->
nh
.
iph
->
daddr
,
skb
->
nh
.
iph
->
daddr
,
skb_checksum
(
skb
,
0
,
skb
->
len
,
0
)))
skb_checksum
(
skb
,
0
,
skb
->
len
,
0
)))
return
-
1
;
return
-
1
;
skb
->
ip_summed
=
CHECKSUM_UNNECESSARY
;
skb
->
ip_summed
=
CHECKSUM_UNNECESSARY
;
}
else
{
}
else
{
skb
->
csum
=
~
tcp_v4_check
(
skb
->
h
.
th
,
skb
->
len
,
skb
->
nh
.
iph
->
saddr
,
skb
->
csum
=
~
tcp_v4_check
(
skb
->
h
.
th
,
skb
->
len
,
skb
->
nh
.
iph
->
daddr
,
0
);
skb
->
nh
.
iph
->
saddr
,
skb
->
nh
.
iph
->
daddr
,
0
);
}
}
return
0
;
return
0
;
}
}
...
@@ -1689,7 +1680,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
...
@@ -1689,7 +1680,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return
0
;
return
0
;
}
}
if
(
skb
->
len
<
(
skb
->
h
.
th
->
doff
<<
2
)
||
tcp_checksum_complete
(
skb
))
if
(
skb
->
len
<
(
skb
->
h
.
th
->
doff
<<
2
)
||
tcp_checksum_complete
(
skb
))
goto
csum_err
;
goto
csum_err
;
if
(
sk
->
state
==
TCP_LISTEN
)
{
if
(
sk
->
state
==
TCP_LISTEN
)
{
...
@@ -1736,7 +1727,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
...
@@ -1736,7 +1727,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
struct
sock
*
sk
;
struct
sock
*
sk
;
int
ret
;
int
ret
;
if
(
skb
->
pkt_type
!=
PACKET_HOST
)
if
(
skb
->
pkt_type
!=
PACKET_HOST
)
goto
discard_it
;
goto
discard_it
;
/* Count it even if it's bad */
/* Count it even if it's bad */
...
@@ -1747,9 +1738,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
...
@@ -1747,9 +1738,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
th
=
skb
->
h
.
th
;
th
=
skb
->
h
.
th
;
if
(
th
->
doff
<
sizeof
(
struct
tcphdr
)
/
4
)
if
(
th
->
doff
<
sizeof
(
struct
tcphdr
)
/
4
)
goto
bad_packet
;
goto
bad_packet
;
if
(
!
pskb_may_pull
(
skb
,
th
->
doff
*
4
))
if
(
!
pskb_may_pull
(
skb
,
th
->
doff
*
4
))
goto
discard_it
;
goto
discard_it
;
/* An explanation is required here, I think.
/* An explanation is required here, I think.
...
@@ -1763,20 +1754,21 @@ int tcp_v4_rcv(struct sk_buff *skb)
...
@@ -1763,20 +1754,21 @@ int tcp_v4_rcv(struct sk_buff *skb)
th
=
skb
->
h
.
th
;
th
=
skb
->
h
.
th
;
TCP_SKB_CB
(
skb
)
->
seq
=
ntohl
(
th
->
seq
);
TCP_SKB_CB
(
skb
)
->
seq
=
ntohl
(
th
->
seq
);
TCP_SKB_CB
(
skb
)
->
end_seq
=
(
TCP_SKB_CB
(
skb
)
->
seq
+
th
->
syn
+
th
->
fin
+
TCP_SKB_CB
(
skb
)
->
end_seq
=
(
TCP_SKB_CB
(
skb
)
->
seq
+
th
->
syn
+
th
->
fin
+
skb
->
len
-
th
->
doff
*
4
);
skb
->
len
-
th
->
doff
*
4
);
TCP_SKB_CB
(
skb
)
->
ack_seq
=
ntohl
(
th
->
ack_seq
);
TCP_SKB_CB
(
skb
)
->
ack_seq
=
ntohl
(
th
->
ack_seq
);
TCP_SKB_CB
(
skb
)
->
when
=
0
;
TCP_SKB_CB
(
skb
)
->
when
=
0
;
TCP_SKB_CB
(
skb
)
->
flags
=
skb
->
nh
.
iph
->
tos
;
TCP_SKB_CB
(
skb
)
->
flags
=
skb
->
nh
.
iph
->
tos
;
TCP_SKB_CB
(
skb
)
->
sacked
=
0
;
TCP_SKB_CB
(
skb
)
->
sacked
=
0
;
sk
=
__tcp_v4_lookup
(
skb
->
nh
.
iph
->
saddr
,
th
->
source
,
sk
=
__tcp_v4_lookup
(
skb
->
nh
.
iph
->
saddr
,
th
->
source
,
skb
->
nh
.
iph
->
daddr
,
ntohs
(
th
->
dest
),
tcp_v4_iif
(
skb
));
skb
->
nh
.
iph
->
daddr
,
ntohs
(
th
->
dest
),
tcp_v4_iif
(
skb
));
if
(
!
sk
)
if
(
!
sk
)
goto
no_tcp_socket
;
goto
no_tcp_socket
;
process:
process:
if
(
!
ipsec_sk_policy
(
sk
,
skb
))
if
(
!
ipsec_sk_policy
(
sk
,
skb
))
goto
discard_and_relse
;
goto
discard_and_relse
;
if
(
sk
->
state
==
TCP_TIME_WAIT
)
if
(
sk
->
state
==
TCP_TIME_WAIT
)
...
@@ -1798,7 +1790,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
...
@@ -1798,7 +1790,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
return
ret
;
return
ret
;
no_tcp_socket:
no_tcp_socket:
if
(
skb
->
len
<
(
th
->
doff
<<
2
)
||
tcp_checksum_complete
(
skb
))
{
if
(
skb
->
len
<
(
th
->
doff
<<
2
)
||
tcp_checksum_complete
(
skb
))
{
bad_packet:
bad_packet:
TCP_INC_STATS_BH
(
TcpInErrs
);
TCP_INC_STATS_BH
(
TcpInErrs
);
}
else
{
}
else
{
...
@@ -1815,18 +1807,17 @@ int tcp_v4_rcv(struct sk_buff *skb)
...
@@ -1815,18 +1807,17 @@ int tcp_v4_rcv(struct sk_buff *skb)
goto
discard_it
;
goto
discard_it
;
do_time_wait:
do_time_wait:
if
(
skb
->
len
<
(
th
->
doff
<<
2
)
||
tcp_checksum_complete
(
skb
))
{
if
(
skb
->
len
<
(
th
->
doff
<<
2
)
||
tcp_checksum_complete
(
skb
))
{
TCP_INC_STATS_BH
(
TcpInErrs
);
TCP_INC_STATS_BH
(
TcpInErrs
);
goto
discard_and_relse
;
goto
discard_and_relse
;
}
}
switch
(
tcp_timewait_state_process
((
struct
tcp_tw_bucket
*
)
sk
,
switch
(
tcp_timewait_state_process
((
struct
tcp_tw_bucket
*
)
sk
,
skb
,
th
,
skb
->
len
))
{
skb
,
th
,
skb
->
len
))
{
case
TCP_TW_SYN
:
case
TCP_TW_SYN
:
{
{
struct
sock
*
sk2
=
tcp_v4_lookup_listener
(
skb
->
nh
.
iph
->
daddr
,
struct
sock
*
sk2
;
ntohs
(
th
->
dest
),
tcp_v4_iif
(
skb
));
sk2
=
tcp_v4_lookup_listener
(
skb
->
nh
.
iph
->
daddr
,
ntohs
(
th
->
dest
),
tcp_v4_iif
(
skb
));
if
(
sk2
)
{
if
(
sk2
!=
NULL
)
{
tcp_tw_deschedule
((
struct
tcp_tw_bucket
*
)
sk
);
tcp_tw_deschedule
((
struct
tcp_tw_bucket
*
)
sk
);
tcp_timewait_kill
((
struct
tcp_tw_bucket
*
)
sk
);
tcp_timewait_kill
((
struct
tcp_tw_bucket
*
)
sk
);
tcp_tw_put
((
struct
tcp_tw_bucket
*
)
sk
);
tcp_tw_put
((
struct
tcp_tw_bucket
*
)
sk
);
...
@@ -1910,7 +1901,7 @@ int tcp_v4_rebuild_header(struct sock *sk)
...
@@ -1910,7 +1901,7 @@ int tcp_v4_rebuild_header(struct sock *sk)
int
err
;
int
err
;
/* Route is OK, nothing to do. */
/* Route is OK, nothing to do. */
if
(
rt
!=
NULL
)
if
(
rt
)
return
0
;
return
0
;
/* Reroute. */
/* Reroute. */
...
@@ -1958,15 +1949,15 @@ int tcp_v4_remember_stamp(struct sock *sk)
...
@@ -1958,15 +1949,15 @@ int tcp_v4_remember_stamp(struct sock *sk)
{
{
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
struct
tcp_opt
*
tp
=
tcp_sk
(
sk
);
struct
tcp_opt
*
tp
=
tcp_sk
(
sk
);
struct
rtable
*
rt
=
(
struct
rtable
*
)
__sk_dst_get
(
sk
);
struct
rtable
*
rt
=
(
struct
rtable
*
)
__sk_dst_get
(
sk
);
struct
inet_peer
*
peer
=
NULL
;
struct
inet_peer
*
peer
=
NULL
;
int
release_it
=
0
;
int
release_it
=
0
;
if
(
rt
==
NULL
||
rt
->
rt_dst
!=
inet
->
daddr
)
{
if
(
!
rt
||
rt
->
rt_dst
!=
inet
->
daddr
)
{
peer
=
inet_getpeer
(
inet
->
daddr
,
1
);
peer
=
inet_getpeer
(
inet
->
daddr
,
1
);
release_it
=
1
;
release_it
=
1
;
}
else
{
}
else
{
if
(
rt
->
peer
==
NULL
)
if
(
!
rt
->
peer
)
rt_bind_peer
(
rt
,
1
);
rt_bind_peer
(
rt
,
1
);
peer
=
rt
->
peer
;
peer
=
rt
->
peer
;
}
}
...
@@ -2007,18 +1998,17 @@ int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
...
@@ -2007,18 +1998,17 @@ int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
}
}
struct
tcp_func
ipv4_specific
=
{
struct
tcp_func
ipv4_specific
=
{
ip_queue_xmit
,
queue_xmit:
ip_queue_xmit
,
tcp_v4_send_check
,
send_check:
tcp_v4_send_check
,
tcp_v4_rebuild_header
,
rebuild_header:
tcp_v4_rebuild_header
,
tcp_v4_conn_request
,
conn_request:
tcp_v4_conn_request
,
tcp_v4_syn_recv_sock
,
syn_recv_sock:
tcp_v4_syn_recv_sock
,
tcp_v4_remember_stamp
,
remember_stamp:
tcp_v4_remember_stamp
,
sizeof
(
struct
iphdr
),
net_header_len:
sizeof
(
struct
iphdr
),
setsockopt:
ip_setsockopt
,
ip_setsockopt
,
getsockopt:
ip_getsockopt
,
ip_getsockopt
,
addr2sockaddr:
v4_addr2sockaddr
,
v4_addr2sockaddr
,
sockaddr_len:
sizeof
(
struct
sockaddr_in
),
sizeof
(
struct
sockaddr_in
)
};
};
/* NOTE: A lot of things set to zero explicitly by call to
/* NOTE: A lot of things set to zero explicitly by call to
...
@@ -2082,7 +2072,7 @@ static int tcp_v4_destroy_sock(struct sock *sk)
...
@@ -2082,7 +2072,7 @@ static int tcp_v4_destroy_sock(struct sock *sk)
__skb_queue_purge
(
&
tp
->
ucopy
.
prequeue
);
__skb_queue_purge
(
&
tp
->
ucopy
.
prequeue
);
/* Clean up a referenced TCP bind bucket. */
/* Clean up a referenced TCP bind bucket. */
if
(
sk
->
prev
!=
NULL
)
if
(
sk
->
prev
)
tcp_put_port
(
sk
);
tcp_put_port
(
sk
);
/* If sendmsg cached page exists, toss it. */
/* If sendmsg cached page exists, toss it. */
...
@@ -2095,7 +2085,8 @@ static int tcp_v4_destroy_sock(struct sock *sk)
...
@@ -2095,7 +2085,8 @@ static int tcp_v4_destroy_sock(struct sock *sk)
}
}
/* Proc filesystem TCP sock list dumping. */
/* Proc filesystem TCP sock list dumping. */
static
void
get_openreq
(
struct
sock
*
sk
,
struct
open_request
*
req
,
char
*
tmpbuf
,
int
i
,
int
uid
)
static
void
get_openreq
(
struct
sock
*
sk
,
struct
open_request
*
req
,
char
*
tmpbuf
,
int
i
,
int
uid
)
{
{
int
ttd
=
req
->
expires
-
jiffies
;
int
ttd
=
req
->
expires
-
jiffies
;
...
@@ -2107,7 +2098,7 @@ static void get_openreq(struct sock *sk, struct open_request *req, char *tmpbuf,
...
@@ -2107,7 +2098,7 @@ static void get_openreq(struct sock *sk, struct open_request *req, char *tmpbuf,
req
->
af
.
v4_req
.
rmt_addr
,
req
->
af
.
v4_req
.
rmt_addr
,
ntohs
(
req
->
rmt_port
),
ntohs
(
req
->
rmt_port
),
TCP_SYN_RECV
,
TCP_SYN_RECV
,
0
,
0
,
/* could print option size, but that is af dependent. */
0
,
0
,
/* could print option size, but that is af dependent. */
1
,
/* timers active (only the expire timer) */
1
,
/* timers active (only the expire timer) */
ttd
,
ttd
,
req
->
retrans
,
req
->
retrans
,
...
@@ -2115,23 +2106,20 @@ static void get_openreq(struct sock *sk, struct open_request *req, char *tmpbuf,
...
@@ -2115,23 +2106,20 @@ static void get_openreq(struct sock *sk, struct open_request *req, char *tmpbuf,
0
,
/* non standard timer */
0
,
/* non standard timer */
0
,
/* open_requests have no inode */
0
,
/* open_requests have no inode */
atomic_read
(
&
sk
->
refcnt
),
atomic_read
(
&
sk
->
refcnt
),
req
req
);
);
}
}
static
void
get_tcp_sock
(
struct
sock
*
sp
,
char
*
tmpbuf
,
int
i
)
static
void
get_tcp_sock
(
struct
sock
*
sp
,
char
*
tmpbuf
,
int
i
)
{
{
unsigned
int
dest
,
src
;
__u16
destp
,
srcp
;
int
timer_active
;
int
timer_active
;
unsigned
long
timer_expires
;
unsigned
long
timer_expires
;
struct
tcp_opt
*
tp
=
tcp_sk
(
sp
);
struct
tcp_opt
*
tp
=
tcp_sk
(
sp
);
struct
inet_opt
*
inet
=
inet_sk
(
sp
);
struct
inet_opt
*
inet
=
inet_sk
(
sp
);
unsigned
int
dest
=
inet
->
daddr
;
unsigned
int
src
=
inet
->
rcv_saddr
;
__u16
destp
=
ntohs
(
inet
->
dport
);
__u16
srcp
=
ntohs
(
inet
->
sport
);
dest
=
inet
->
daddr
;
src
=
inet
->
rcv_saddr
;
destp
=
ntohs
(
inet
->
dport
);
srcp
=
ntohs
(
inet
->
sport
);
if
(
tp
->
pending
==
TCP_TIME_RETRANS
)
{
if
(
tp
->
pending
==
TCP_TIME_RETRANS
)
{
timer_active
=
1
;
timer_active
=
1
;
timer_expires
=
tp
->
timeout
;
timer_expires
=
tp
->
timeout
;
...
@@ -2146,19 +2134,19 @@ static void get_tcp_sock(struct sock *sp, char *tmpbuf, int i)
...
@@ -2146,19 +2134,19 @@ static void get_tcp_sock(struct sock *sp, char *tmpbuf, int i)
timer_expires
=
jiffies
;
timer_expires
=
jiffies
;
}
}
sprintf
(
tmpbuf
,
"%4d: %08X:%04X %08X:%04X"
sprintf
(
tmpbuf
,
"%4d: %08X:%04X %08X:%04X
%02X %08X:%08X %02X:%08lX
"
" %02X %08X:%08X %02X:%08lX
%08X %5d %8d %lu %d %p %u %u %u %u %d"
,
"
%08X %5d %8d %lu %d %p %u %u %u %u %d"
,
i
,
src
,
srcp
,
dest
,
destp
,
sp
->
state
,
i
,
src
,
srcp
,
dest
,
destp
,
sp
->
state
,
tp
->
write_seq
-
tp
->
snd_una
,
tp
->
rcv_nxt
-
tp
->
copied_seq
,
tp
->
write_seq
-
tp
->
snd_una
,
tp
->
rcv_nxt
-
tp
->
copied_seq
,
timer_active
,
timer_expires
-
jiffies
,
timer_active
,
timer_expires
-
jiffies
,
tp
->
retransmits
,
tp
->
retransmits
,
sock_i_uid
(
sp
),
sock_i_uid
(
sp
),
tp
->
probes_out
,
tp
->
probes_out
,
sock_i_ino
(
sp
),
sock_i_ino
(
sp
),
atomic_read
(
&
sp
->
refcnt
),
sp
,
atomic_read
(
&
sp
->
refcnt
),
sp
,
tp
->
rto
,
tp
->
ack
.
ato
,
(
tp
->
ack
.
quick
<<
1
)
|
tp
->
ack
.
pingpong
,
tp
->
rto
,
tp
->
ack
.
ato
,
(
tp
->
ack
.
quick
<<
1
)
|
tp
->
ack
.
pingpong
,
tp
->
snd_cwnd
,
tp
->
snd_ssthresh
>=
0xFFFF
?-
1
:
tp
->
snd_ssthresh
tp
->
snd_cwnd
,
);
tp
->
snd_ssthresh
>=
0xFFFF
?
-
1
:
tp
->
snd_ssthresh
);
}
}
static
void
get_timewait_sock
(
struct
tcp_tw_bucket
*
tw
,
char
*
tmpbuf
,
int
i
)
static
void
get_timewait_sock
(
struct
tcp_tw_bucket
*
tw
,
char
*
tmpbuf
,
int
i
)
...
@@ -2188,18 +2176,19 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
...
@@ -2188,18 +2176,19 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
{
{
int
len
=
0
,
num
=
0
,
i
;
int
len
=
0
,
num
=
0
,
i
;
off_t
begin
,
pos
=
0
;
off_t
begin
,
pos
=
0
;
char
tmpbuf
[
TMPSZ
+
1
];
char
tmpbuf
[
TMPSZ
+
1
];
if
(
offset
<
TMPSZ
)
if
(
offset
<
TMPSZ
)
len
+=
sprintf
(
buffer
,
"%-*s
\n
"
,
TMPSZ
-
1
,
len
+=
sprintf
(
buffer
,
"%-*s
\n
"
,
TMPSZ
-
1
,
" sl local_address rem_address st tx_queue "
" sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout inode"
);
"rx_queue tr tm->when retrnsmt uid timeout "
"inode"
);
pos
=
TMPSZ
;
pos
=
TMPSZ
;
/* First, walk listening socket table. */
/* First, walk listening socket table. */
tcp_listen_lock
();
tcp_listen_lock
();
for
(
i
=
0
;
i
<
TCP_LHTABLE_SIZE
;
i
++
)
{
for
(
i
=
0
;
i
<
TCP_LHTABLE_SIZE
;
i
++
)
{
struct
sock
*
sk
;
struct
sock
*
sk
;
struct
tcp_listen_opt
*
lopt
;
struct
tcp_listen_opt
*
lopt
;
int
k
;
int
k
;
...
@@ -2215,7 +2204,8 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
...
@@ -2215,7 +2204,8 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
pos
+=
TMPSZ
;
pos
+=
TMPSZ
;
if
(
pos
>=
offset
)
{
if
(
pos
>=
offset
)
{
get_tcp_sock
(
sk
,
tmpbuf
,
num
);
get_tcp_sock
(
sk
,
tmpbuf
,
num
);
len
+=
sprintf
(
buffer
+
len
,
"%-*s
\n
"
,
TMPSZ
-
1
,
tmpbuf
);
len
+=
sprintf
(
buffer
+
len
,
"%-*s
\n
"
,
TMPSZ
-
1
,
tmpbuf
);
if
(
pos
>=
offset
+
length
)
{
if
(
pos
>=
offset
+
length
)
{
tcp_listen_unlock
();
tcp_listen_unlock
();
goto
out_no_bh
;
goto
out_no_bh
;
...
@@ -2226,17 +2216,22 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
...
@@ -2226,17 +2216,22 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
uid
=
sock_i_uid
(
sk
);
uid
=
sock_i_uid
(
sk
);
read_lock_bh
(
&
tp
->
syn_wait_lock
);
read_lock_bh
(
&
tp
->
syn_wait_lock
);
lopt
=
tp
->
listen_opt
;
lopt
=
tp
->
listen_opt
;
if
(
lopt
&&
lopt
->
qlen
!=
0
)
{
if
(
lopt
&&
lopt
->
qlen
)
{
for
(
k
=
0
;
k
<
TCP_SYNQ_HSIZE
;
k
++
)
{
for
(
k
=
0
;
k
<
TCP_SYNQ_HSIZE
;
k
++
)
{
for
(
req
=
lopt
->
syn_table
[
k
];
req
;
req
=
req
->
dl_next
,
num
++
)
{
for
(
req
=
lopt
->
syn_table
[
k
];
req
;
req
=
req
->
dl_next
,
num
++
)
{
if
(
!
TCP_INET_FAMILY
(
req
->
class
->
family
))
if
(
!
TCP_INET_FAMILY
(
req
->
class
->
family
))
continue
;
continue
;
pos
+=
TMPSZ
;
pos
+=
TMPSZ
;
if
(
pos
<=
offset
)
if
(
pos
<=
offset
)
continue
;
continue
;
get_openreq
(
sk
,
req
,
tmpbuf
,
num
,
uid
);
get_openreq
(
sk
,
req
,
tmpbuf
,
len
+=
sprintf
(
buffer
+
len
,
"%-*s
\n
"
,
TMPSZ
-
1
,
tmpbuf
);
num
,
uid
);
len
+=
sprintf
(
buffer
+
len
,
"%-*s
\n
"
,
TMPSZ
-
1
,
tmpbuf
);
if
(
pos
>=
offset
+
length
)
{
if
(
pos
>=
offset
+
length
)
{
read_unlock_bh
(
&
tp
->
syn_wait_lock
);
read_unlock_bh
(
&
tp
->
syn_wait_lock
);
tcp_listen_unlock
();
tcp_listen_unlock
();
...
@@ -2261,21 +2256,23 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
...
@@ -2261,21 +2256,23 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
struct
tcp_tw_bucket
*
tw
;
struct
tcp_tw_bucket
*
tw
;
read_lock
(
&
head
->
lock
);
read_lock
(
&
head
->
lock
);
for
(
sk
=
head
->
chain
;
sk
;
sk
=
sk
->
next
,
num
++
)
{
for
(
sk
=
head
->
chain
;
sk
;
sk
=
sk
->
next
,
num
++
)
{
if
(
!
TCP_INET_FAMILY
(
sk
->
family
))
if
(
!
TCP_INET_FAMILY
(
sk
->
family
))
continue
;
continue
;
pos
+=
TMPSZ
;
pos
+=
TMPSZ
;
if
(
pos
<=
offset
)
if
(
pos
<=
offset
)
continue
;
continue
;
get_tcp_sock
(
sk
,
tmpbuf
,
num
);
get_tcp_sock
(
sk
,
tmpbuf
,
num
);
len
+=
sprintf
(
buffer
+
len
,
"%-*s
\n
"
,
TMPSZ
-
1
,
tmpbuf
);
len
+=
sprintf
(
buffer
+
len
,
"%-*s
\n
"
,
TMPSZ
-
1
,
tmpbuf
);
if
(
pos
>=
offset
+
length
)
{
if
(
pos
>=
offset
+
length
)
{
read_unlock
(
&
head
->
lock
);
read_unlock
(
&
head
->
lock
);
goto
out
;
goto
out
;
}
}
}
}
for
(
tw
=
(
struct
tcp_tw_bucket
*
)
tcp_ehash
[
i
+
tcp_ehash_size
].
chain
;
for
(
tw
=
(
struct
tcp_tw_bucket
*
)
tcp_ehash
[
i
+
tw
!=
NULL
;
tcp_ehash_size
].
chain
;
tw
;
tw
=
(
struct
tcp_tw_bucket
*
)
tw
->
next
,
num
++
)
{
tw
=
(
struct
tcp_tw_bucket
*
)
tw
->
next
,
num
++
)
{
if
(
!
TCP_INET_FAMILY
(
tw
->
family
))
if
(
!
TCP_INET_FAMILY
(
tw
->
family
))
continue
;
continue
;
...
@@ -2283,7 +2280,8 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
...
@@ -2283,7 +2280,8 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
if
(
pos
<=
offset
)
if
(
pos
<=
offset
)
continue
;
continue
;
get_timewait_sock
(
tw
,
tmpbuf
,
num
);
get_timewait_sock
(
tw
,
tmpbuf
,
num
);
len
+=
sprintf
(
buffer
+
len
,
"%-*s
\n
"
,
TMPSZ
-
1
,
tmpbuf
);
len
+=
sprintf
(
buffer
+
len
,
"%-*s
\n
"
,
TMPSZ
-
1
,
tmpbuf
);
if
(
pos
>=
offset
+
length
)
{
if
(
pos
>=
offset
+
length
)
{
read_unlock
(
&
head
->
lock
);
read_unlock
(
&
head
->
lock
);
goto
out
;
goto
out
;
...
@@ -2333,7 +2331,7 @@ void __init tcp_v4_init(struct net_proto_family *ops)
...
@@ -2333,7 +2331,7 @@ void __init tcp_v4_init(struct net_proto_family *ops)
int
err
=
sock_create
(
PF_INET
,
SOCK_RAW
,
IPPROTO_TCP
,
&
tcp_socket
);
int
err
=
sock_create
(
PF_INET
,
SOCK_RAW
,
IPPROTO_TCP
,
&
tcp_socket
);
if
(
err
<
0
)
if
(
err
<
0
)
panic
(
"Failed to create the TCP control socket.
\n
"
);
panic
(
"Failed to create the TCP control socket.
\n
"
);
tcp_socket
->
sk
->
allocation
=
GFP_ATOMIC
;
tcp_socket
->
sk
->
allocation
=
GFP_ATOMIC
;
inet_sk
(
tcp_socket
->
sk
)
->
ttl
=
MAXTTL
;
inet_sk
(
tcp_socket
->
sk
)
->
ttl
=
MAXTTL
;
/* Unhash it so that IP input processing does not even
/* Unhash it so that IP input processing does not even
...
...
net/ipv6/raw.c
View file @
868f24fc
...
@@ -11,6 +11,7 @@
...
@@ -11,6 +11,7 @@
*
*
* Fixes:
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
* Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
*
*
* This program is free software; you can redistribute it and/or
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* modify it under the terms of the GNU General Public License
...
@@ -487,11 +488,18 @@ static int rawv6_frag_cksum(const void *data, struct in6_addr *addr,
...
@@ -487,11 +488,18 @@ static int rawv6_frag_cksum(const void *data, struct in6_addr *addr,
hdr
->
cksum
=
csum_ipv6_magic
(
addr
,
daddr
,
hdr
->
len
,
hdr
->
cksum
=
csum_ipv6_magic
(
addr
,
daddr
,
hdr
->
len
,
hdr
->
proto
,
hdr
->
cksum
);
hdr
->
proto
,
hdr
->
cksum
);
if
(
opt
->
offset
<
len
)
{
if
(
opt
->
offset
+
1
<
len
)
{
__u16
*
csum
;
__u16
*
csum
;
csum
=
(
__u16
*
)
(
buff
+
opt
->
offset
);
csum
=
(
__u16
*
)
(
buff
+
opt
->
offset
);
if
(
*
csum
)
{
/* in case cksum was not initialized */
__u32
sum
=
hdr
->
cksum
;
sum
+=
*
csum
;
*
csum
=
hdr
->
cksum
=
(
sum
+
(
sum
>>
16
));
}
else
{
*
csum
=
hdr
->
cksum
;
*
csum
=
hdr
->
cksum
;
}
}
else
{
}
else
{
if
(
net_ratelimit
())
if
(
net_ratelimit
())
printk
(
KERN_DEBUG
"icmp: cksum offset too big
\n
"
);
printk
(
KERN_DEBUG
"icmp: cksum offset too big
\n
"
);
...
@@ -720,6 +728,10 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
...
@@ -720,6 +728,10 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
switch
(
optname
)
{
switch
(
optname
)
{
case
IPV6_CHECKSUM
:
case
IPV6_CHECKSUM
:
/* You may get strange result with a positive odd offset;
RFC2292bis agrees with me. */
if
(
val
>
0
&&
(
val
&
1
))
return
(
-
EINVAL
);
if
(
val
<
0
)
{
if
(
val
<
0
)
{
opt
->
checksum
=
0
;
opt
->
checksum
=
0
;
}
else
{
}
else
{
...
@@ -817,6 +829,11 @@ static void rawv6_close(struct sock *sk, long timeout)
...
@@ -817,6 +829,11 @@ static void rawv6_close(struct sock *sk, long timeout)
static
int
rawv6_init_sk
(
struct
sock
*
sk
)
static
int
rawv6_init_sk
(
struct
sock
*
sk
)
{
{
if
(
inet_sk
(
sk
)
->
num
==
IPPROTO_ICMPV6
)
{
struct
raw6_opt
*
opt
=
raw6_sk
(
sk
);
opt
->
checksum
=
1
;
opt
->
offset
=
2
;
}
return
(
0
);
return
(
0
);
}
}
...
...
net/sched/sch_api.c
View file @
868f24fc
...
@@ -1117,7 +1117,7 @@ static void psched_tick(unsigned long dummy)
...
@@ -1117,7 +1117,7 @@ static void psched_tick(unsigned long dummy)
psched_timer
.
expires
=
jiffies
+
1
*
HZ
;
psched_timer
.
expires
=
jiffies
+
1
*
HZ
;
#else
#else
unsigned
long
now
=
jiffies
;
unsigned
long
now
=
jiffies
;
psched_time_base
=
((
u64
)
now
)
<<
PSCHED_JSCALE
;
psched_time_base
+=
((
u64
)(
now
-
psched_time_mark
)
)
<<
PSCHED_JSCALE
;
psched_time_mark
=
now
;
psched_time_mark
=
now
;
psched_timer
.
expires
=
now
+
60
*
60
*
HZ
;
psched_timer
.
expires
=
now
+
60
*
60
*
HZ
;
#endif
#endif
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment