Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
e17427d4
Commit
e17427d4
authored
Jun 26, 2003
by
David S. Miller
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[TCP]: If we have a lot of time-wait sockets to kill, do it via workqueue.
parent
2ecefe4d
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
76 additions
and
12 deletions
+76
-12
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_minisocks.c
+76
-12
No files found.
net/ipv4/tcp_minisocks.c
View file @
e17427d4
...
@@ -23,6 +23,7 @@
...
@@ -23,6 +23,7 @@
#include <linux/config.h>
#include <linux/config.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/sysctl.h>
#include <linux/workqueue.h>
#include <net/tcp.h>
#include <net/tcp.h>
#include <net/inet_common.h>
#include <net/inet_common.h>
#include <net/xfrm.h>
#include <net/xfrm.h>
...
@@ -407,15 +408,22 @@ static void tcp_twkill(unsigned long);
...
@@ -407,15 +408,22 @@ static void tcp_twkill(unsigned long);
#define TCP_TWKILL_SLOTS 8
/* Please keep this a power of 2. */
#define TCP_TWKILL_SLOTS 8
/* Please keep this a power of 2. */
#define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
#define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
#define TCP_TWKILL_QUOTA 100
static
struct
hlist_head
tcp_tw_death_row
[
TCP_TWKILL_SLOTS
];
static
struct
hlist_head
tcp_tw_death_row
[
TCP_TWKILL_SLOTS
];
static
spinlock_t
tw_death_lock
=
SPIN_LOCK_UNLOCKED
;
static
spinlock_t
tw_death_lock
=
SPIN_LOCK_UNLOCKED
;
static
struct
timer_list
tcp_tw_timer
=
TIMER_INITIALIZER
(
tcp_twkill
,
0
,
0
);
static
struct
timer_list
tcp_tw_timer
=
TIMER_INITIALIZER
(
tcp_twkill
,
0
,
0
);
static
void
twkill_work
(
void
*
);
static
DECLARE_WORK
(
tcp_twkill_work
,
twkill_work
,
NULL
);
static
u32
twkill_thread_slots
;
static
void
tcp_twkill
(
unsigned
long
dummy
)
/* Returns non-zero if quota exceeded. */
static
int
tcp_do_twkill_work
(
int
slot
,
unsigned
int
quota
)
{
{
struct
tcp_tw_bucket
*
tw
;
struct
tcp_tw_bucket
*
tw
;
struct
hlist_node
*
node
,
*
safe
;
struct
hlist_node
*
node
,
*
safe
;
int
killed
=
0
;
unsigned
int
killed
;
int
ret
;
/* NOTE: compare this to previous version where lock
/* NOTE: compare this to previous version where lock
* was released after detaching chain. It was racy,
* was released after detaching chain. It was racy,
...
@@ -423,30 +431,86 @@ static void tcp_twkill(unsigned long dummy)
...
@@ -423,30 +431,86 @@ static void tcp_twkill(unsigned long dummy)
* in 2.3 (with netfilter), and with softnet it is common, because
* in 2.3 (with netfilter), and with softnet it is common, because
* soft irqs are not sequenced.
* soft irqs are not sequenced.
*/
*/
spin_lock
(
&
tw_death_lock
);
killed
=
0
;
ret
=
0
;
if
(
tcp_tw_count
==
0
)
goto
out
;
tw_for_each_inmate
(
tw
,
node
,
safe
,
tw_for_each_inmate
(
tw
,
node
,
safe
,
&
tcp_tw_death_row
[
tcp_tw_death_row_
slot
])
{
&
tcp_tw_death_row
[
slot
])
{
__tw_del_dead_node
(
tw
);
__tw_del_dead_node
(
tw
);
spin_unlock
(
&
tw_death_lock
);
spin_unlock
(
&
tw_death_lock
);
tcp_timewait_kill
(
tw
);
tcp_timewait_kill
(
tw
);
tcp_tw_put
(
tw
);
tcp_tw_put
(
tw
);
killed
++
;
killed
++
;
spin_lock
(
&
tw_death_lock
);
spin_lock
(
&
tw_death_lock
);
if
(
killed
>
quota
)
{
ret
=
1
;
break
;
}
}
}
tcp_tw_death_row_slot
=
((
tcp_tw_death_row_slot
+
1
)
&
(
TCP_TWKILL_SLOTS
-
1
));
if
((
tcp_tw_count
-=
killed
)
!=
0
)
tcp_tw_count
-=
killed
;
mod_timer
(
&
tcp_tw_timer
,
jiffies
+
TCP_TWKILL_PERIOD
);
NET_ADD_STATS_BH
(
TimeWaited
,
killed
);
NET_ADD_STATS_BH
(
TimeWaited
,
killed
);
return
ret
;
}
static
void
tcp_twkill
(
unsigned
long
dummy
)
{
int
need_timer
,
ret
;
spin_lock
(
&
tw_death_lock
);
if
(
tcp_tw_count
==
0
)
goto
out
;
need_timer
=
0
;
ret
=
tcp_do_twkill_work
(
tcp_tw_death_row_slot
,
TCP_TWKILL_QUOTA
);
if
(
ret
)
{
twkill_thread_slots
|=
(
1
<<
tcp_tw_death_row_slot
);
mb
();
schedule_work
(
&
tcp_twkill_work
);
need_timer
=
1
;
}
else
{
/* We purged the entire slot, anything left? */
if
(
tcp_tw_count
)
need_timer
=
1
;
}
tcp_tw_death_row_slot
=
((
tcp_tw_death_row_slot
+
1
)
&
(
TCP_TWKILL_SLOTS
-
1
));
if
(
need_timer
)
mod_timer
(
&
tcp_tw_timer
,
jiffies
+
TCP_TWKILL_PERIOD
);
out:
out:
spin_unlock
(
&
tw_death_lock
);
spin_unlock
(
&
tw_death_lock
);
}
}
extern
void
twkill_slots_invalid
(
void
);
static
void
twkill_work
(
void
*
dummy
)
{
int
i
;
if
((
TCP_TWKILL_SLOTS
-
1
)
>
(
sizeof
(
twkill_thread_slots
)
*
8
))
twkill_slots_invalid
();
while
(
twkill_thread_slots
)
{
spin_lock_bh
(
&
tw_death_lock
);
for
(
i
=
0
;
i
<
TCP_TWKILL_SLOTS
;
i
++
)
{
if
(
!
(
twkill_thread_slots
&
(
1
<<
i
)))
continue
;
while
(
tcp_do_twkill_work
(
i
,
TCP_TWKILL_QUOTA
)
!=
0
)
{
if
(
need_resched
())
{
spin_unlock_bh
(
&
tw_death_lock
);
schedule
();
spin_lock_bh
(
&
tw_death_lock
);
}
}
twkill_thread_slots
&=
~
(
1
<<
i
);
}
spin_unlock_bh
(
&
tw_death_lock
);
}
}
/* These are always called from BH context. See callers in
/* These are always called from BH context. See callers in
* tcp_input.c to verify this.
* tcp_input.c to verify this.
*/
*/
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment