Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
9850a96f
Commit
9850a96f
authored
Mar 24, 2003
by
Jon Grimm
Browse files
Options
Browse Files
Download
Plain Diff
Merge touki.austin.ibm.com:/home/jgrimm/bk/linux-2.5.66
into touki.austin.ibm.com:/home/jgrimm/bk/lksctp-2.5.work
parents
42382f86
a33b4399
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
1202 additions
and
1151 deletions
+1202
-1151
include/net/sctp/constants.h
include/net/sctp/constants.h
+3
-5
include/net/sctp/sctp.h
include/net/sctp/sctp.h
+31
-22
include/net/sctp/structs.h
include/net/sctp/structs.h
+6
-3
net/sctp/associola.c
net/sctp/associola.c
+12
-25
net/sctp/input.c
net/sctp/input.c
+110
-64
net/sctp/ipv6.c
net/sctp/ipv6.c
+63
-18
net/sctp/output.c
net/sctp/output.c
+40
-2
net/sctp/outqueue.c
net/sctp/outqueue.c
+6
-7
net/sctp/sm_sideeffect.c
net/sctp/sm_sideeffect.c
+931
-1005
No files found.
include/net/sctp/constants.h
View file @
9850a96f
...
@@ -138,12 +138,10 @@ typedef enum {
...
@@ -138,12 +138,10 @@ typedef enum {
*/
*/
typedef
union
{
typedef
union
{
sctp_cid_t
chunk
;
sctp_cid_t
chunk
;
sctp_event_timeout_t
timeout
;
sctp_event_timeout_t
timeout
;
sctp_event_other_t
other
;
sctp_event_other_t
other
;
sctp_event_primitive_t
primitive
;
sctp_event_primitive_t
primitive
;
}
sctp_subtype_t
;
}
sctp_subtype_t
;
#define SCTP_SUBTYPE_CONSTRUCTOR(_name, _type, _elt) \
#define SCTP_SUBTYPE_CONSTRUCTOR(_name, _type, _elt) \
...
@@ -421,9 +419,9 @@ typedef enum {
...
@@ -421,9 +419,9 @@ typedef enum {
/* Reasons to retransmit. */
/* Reasons to retransmit. */
typedef
enum
{
typedef
enum
{
SCTP_R
ETRANSMIT
_T3_RTX
,
SCTP_R
TXR
_T3_RTX
,
SCTP_R
ETRANSMIT
_FAST_RTX
,
SCTP_R
TXR
_FAST_RTX
,
SCTP_R
ETRANSMIT_PMTU_DISCOVERY
,
SCTP_R
TXR_PMTUD
,
}
sctp_retransmit_reason_t
;
}
sctp_retransmit_reason_t
;
/* Reasons to lower cwnd. */
/* Reasons to lower cwnd. */
...
...
include/net/sctp/sctp.h
View file @
9850a96f
...
@@ -123,14 +123,14 @@
...
@@ -123,14 +123,14 @@
*/
*/
extern
struct
sctp_protocol
sctp_proto
;
extern
struct
sctp_protocol
sctp_proto
;
extern
struct
sock
*
sctp_get_ctl_sock
(
void
);
extern
struct
sock
*
sctp_get_ctl_sock
(
void
);
extern
int
sctp_copy_local_addr_list
(
struct
sctp_protocol
*
,
extern
int
sctp_copy_local_addr_list
(
struct
sctp_protocol
*
,
struct
sctp_bind_addr
*
,
struct
sctp_bind_addr
*
,
sctp_scope_t
,
int
priority
,
int
flags
);
sctp_scope_t
,
int
priority
,
int
flags
);
extern
struct
sctp_pf
*
sctp_get_pf_specific
(
sa_family_t
family
);
extern
struct
sctp_pf
*
sctp_get_pf_specific
(
sa_family_t
family
);
extern
int
sctp_register_pf
(
struct
sctp_pf
*
,
sa_family_t
);
extern
int
sctp_register_pf
(
struct
sctp_pf
*
,
sa_family_t
);
/*
/*
* sctp
_
socket.c
* sctp
/
socket.c
*/
*/
extern
int
sctp_backlog_rcv
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
);
extern
int
sctp_backlog_rcv
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
);
extern
int
sctp_inet_listen
(
struct
socket
*
sock
,
int
backlog
);
extern
int
sctp_inet_listen
(
struct
socket
*
sock
,
int
backlog
);
...
@@ -139,7 +139,7 @@ extern unsigned int sctp_poll(struct file *file, struct socket *sock,
...
@@ -139,7 +139,7 @@ extern unsigned int sctp_poll(struct file *file, struct socket *sock,
poll_table
*
wait
);
poll_table
*
wait
);
/*
/*
* sctp
_
primitive.c
* sctp
/
primitive.c
*/
*/
extern
int
sctp_primitive_ASSOCIATE
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_ASSOCIATE
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_SHUTDOWN
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_SHUTDOWN
(
sctp_association_t
*
,
void
*
arg
);
...
@@ -148,14 +148,14 @@ extern int sctp_primitive_SEND(sctp_association_t *, void *arg);
...
@@ -148,14 +148,14 @@ extern int sctp_primitive_SEND(sctp_association_t *, void *arg);
extern
int
sctp_primitive_REQUESTHEARTBEAT
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_REQUESTHEARTBEAT
(
sctp_association_t
*
,
void
*
arg
);
/*
/*
* sctp
_
crc32c.c
* sctp
/
crc32c.c
*/
*/
extern
__u32
sctp_start_cksum
(
__u8
*
ptr
,
__u16
count
);
extern
__u32
sctp_start_cksum
(
__u8
*
ptr
,
__u16
count
);
extern
__u32
sctp_update_cksum
(
__u8
*
ptr
,
__u16
count
,
__u32
cksum
);
extern
__u32
sctp_update_cksum
(
__u8
*
ptr
,
__u16
count
,
__u32
cksum
);
extern
__u32
sctp_end_cksum
(
__u32
cksum
);
extern
__u32
sctp_end_cksum
(
__u32
cksum
);
/*
/*
* sctp
_
input.c
* sctp
/
input.c
*/
*/
extern
int
sctp_rcv
(
struct
sk_buff
*
skb
);
extern
int
sctp_rcv
(
struct
sk_buff
*
skb
);
extern
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
u32
info
);
extern
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
u32
info
);
...
@@ -170,9 +170,16 @@ extern void __sctp_unhash_endpoint(sctp_endpoint_t *);
...
@@ -170,9 +170,16 @@ extern void __sctp_unhash_endpoint(sctp_endpoint_t *);
extern
sctp_association_t
*
__sctp_lookup_association
(
const
union
sctp_addr
*
,
extern
sctp_association_t
*
__sctp_lookup_association
(
const
union
sctp_addr
*
,
const
union
sctp_addr
*
,
const
union
sctp_addr
*
,
struct
sctp_transport
**
);
struct
sctp_transport
**
);
extern
struct
sock
*
sctp_err_lookup
(
int
family
,
struct
sk_buff
*
,
struct
sctphdr
*
,
struct
sctp_endpoint
**
,
struct
sctp_association
**
,
struct
sctp_transport
**
);
extern
void
sctp_err_finish
(
struct
sock
*
,
struct
sctp_endpoint
*
,
struct
sctp_association
*
);
extern
void
sctp_icmp_frag_needed
(
struct
sock
*
,
struct
sctp_association
*
,
struct
sctp_transport
*
t
,
__u32
pmtu
);
/*
/*
* sctp
_
hashdriver.c
* sctp
/
hashdriver.c
*/
*/
extern
void
sctp_hash_digest
(
const
char
*
secret
,
const
int
secret_len
,
extern
void
sctp_hash_digest
(
const
char
*
secret
,
const
int
secret_len
,
const
char
*
text
,
const
int
text_len
,
const
char
*
text
,
const
int
text_len
,
...
@@ -184,9 +191,7 @@ extern void sctp_hash_digest(const char *secret, const int secret_len,
...
@@ -184,9 +191,7 @@ extern void sctp_hash_digest(const char *secret, const int secret_len,
#ifdef TEST_FRAME
#ifdef TEST_FRAME
#include <test_frame.h>
#include <test_frame.h>
#else
#else
/* spin lock wrappers. */
/* spin lock wrappers. */
...
@@ -312,7 +317,6 @@ static inline void sctp_sysctl_register(void) { return; }
...
@@ -312,7 +317,6 @@ static inline void sctp_sysctl_register(void) { return; }
static
inline
void
sctp_sysctl_unregister
(
void
)
{
return
;
}
static
inline
void
sctp_sysctl_unregister
(
void
)
{
return
;
}
#endif
#endif
/* Size of Supported Address Parameter for 'x' address types. */
/* Size of Supported Address Parameter for 'x' address types. */
#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
...
@@ -320,19 +324,15 @@ static inline void sctp_sysctl_unregister(void) { return; }
...
@@ -320,19 +324,15 @@ static inline void sctp_sysctl_unregister(void) { return; }
extern
int
sctp_v6_init
(
void
);
extern
int
sctp_v6_init
(
void
);
extern
void
sctp_v6_exit
(
void
);
extern
void
sctp_v6_exit
(
void
);
static
inline
int
sctp_ipv6_addr_type
(
const
struct
in6_addr
*
addr
)
extern
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
struct
inet6_skb_parm
*
opt
,
{
int
type
,
int
code
,
int
offset
,
__u32
info
);
return
ipv6_addr_type
((
struct
in6_addr
*
)
addr
);
}
#else
/* #ifdef defined(CONFIG_IPV6)
|| defined(CONFIG_IPV6_MODULE)
*/
#else
/* #ifdef defined(CONFIG_IPV6) */
#define sctp_ipv6_addr_type(a) 0
static
inline
int
sctp_v6_init
(
void
)
{
return
0
;
}
static
inline
int
sctp_v6_init
(
void
)
{
return
0
;
}
static
inline
void
sctp_v6_exit
(
void
)
{
return
;
}
static
inline
void
sctp_v6_exit
(
void
)
{
return
;
}
#endif
/* #ifdef defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
#endif
/* #if defined(CONFIG_IPV6) */
/* Map an association to an assoc_id. */
/* Map an association to an assoc_id. */
static
inline
sctp_assoc_t
sctp_assoc2id
(
const
sctp_association_t
*
asoc
)
static
inline
sctp_assoc_t
sctp_assoc2id
(
const
sctp_association_t
*
asoc
)
...
@@ -414,13 +414,22 @@ static inline __s32 sctp_jitter(__u32 rto)
...
@@ -414,13 +414,22 @@ static inline __s32 sctp_jitter(__u32 rto)
sctp_rand
^=
(
sctp_rand
<<
12
);
sctp_rand
^=
(
sctp_rand
<<
12
);
sctp_rand
^=
(
sctp_rand
>>
20
);
sctp_rand
^=
(
sctp_rand
>>
20
);
/* Choose random number from 0 to rto, then move to -50% ~ +50%
/* Choose random number from 0 to rto, then move to -50% ~ +50%
* of rto.
* of rto.
*/
*/
ret
=
sctp_rand
%
rto
-
(
rto
>>
1
);
ret
=
sctp_rand
%
rto
-
(
rto
>>
1
);
return
ret
;
return
ret
;
}
}
/* Break down data chunks at this point. */
static
inline
int
sctp_frag_point
(
int
pmtu
)
{
pmtu
-=
SCTP_IP_OVERHEAD
+
sizeof
(
struct
sctp_data_chunk
);
pmtu
-=
sizeof
(
struct
sctp_sack_chunk
);
return
pmtu
;
}
/* Walk through a list of TLV parameters. Don't trust the
/* Walk through a list of TLV parameters. Don't trust the
* individual parameter lengths and instead depend on
* individual parameter lengths and instead depend on
* the chunk length to indicate when to stop. Make sure
* the chunk length to indicate when to stop. Make sure
...
@@ -537,7 +546,7 @@ struct sctp_sock {
...
@@ -537,7 +546,7 @@ struct sctp_sock {
struct
sock
sk
;
struct
sock
sk
;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct
ipv6_pinfo
*
pinet6
;
struct
ipv6_pinfo
*
pinet6
;
#endif
/* CONFIG_IPV6
|| CONFIG_IPV6_MODULE
*/
#endif
/* CONFIG_IPV6 */
struct
inet_opt
inet
;
struct
inet_opt
inet
;
struct
sctp_opt
sctp
;
struct
sctp_opt
sctp
;
};
};
...
@@ -550,7 +559,7 @@ struct sctp6_sock {
...
@@ -550,7 +559,7 @@ struct sctp6_sock {
struct
sctp_opt
sctp
;
struct
sctp_opt
sctp
;
struct
ipv6_pinfo
inet6
;
struct
ipv6_pinfo
inet6
;
};
};
#endif
/* CONFIG_IPV6
|| CONFIG_IPV6_MODULE
*/
#endif
/* CONFIG_IPV6 */
#define sctp_sk(__sk) (&((struct sctp_sock *)__sk)->sctp)
#define sctp_sk(__sk) (&((struct sctp_sock *)__sk)->sctp)
...
...
include/net/sctp/structs.h
View file @
9850a96f
...
@@ -590,13 +590,16 @@ struct sctp_packet {
...
@@ -590,13 +590,16 @@ struct sctp_packet {
/* This packet should advertise ECN capability to the network
/* This packet should advertise ECN capability to the network
* via the ECT bit.
* via the ECT bit.
*/
*/
int
ecn_capable
;
char
ecn_capable
;
/* This packet contains a COOKIE-ECHO chunk. */
/* This packet contains a COOKIE-ECHO chunk. */
int
has_cookie_echo
;
char
has_cookie_echo
;
/* This packet containsa SACK chunk. */
char
has_sack
;
/* SCTP cannot fragment this packet. So let ip fragment it. */
/* SCTP cannot fragment this packet. So let ip fragment it. */
int
ipfragok
;
char
ipfragok
;
int
malloced
;
int
malloced
;
};
};
...
...
net/sctp/associola.c
View file @
9850a96f
...
@@ -421,8 +421,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
...
@@ -421,8 +421,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
SCTP_DEBUG_PRINTK
(
"sctp_assoc_add_peer:association %p PMTU set to "
SCTP_DEBUG_PRINTK
(
"sctp_assoc_add_peer:association %p PMTU set to "
"%d
\n
"
,
asoc
,
asoc
->
pmtu
);
"%d
\n
"
,
asoc
,
asoc
->
pmtu
);
asoc
->
frag_point
=
asoc
->
pmtu
;
asoc
->
frag_point
=
sctp_frag_point
(
asoc
->
pmtu
);
asoc
->
frag_point
-=
SCTP_IP_OVERHEAD
+
sizeof
(
struct
sctp_data_chunk
);
/* The asoc->peer.port might not be meaningful yet, but
/* The asoc->peer.port might not be meaningful yet, but
* initialize the packet structure anyway.
* initialize the packet structure anyway.
...
@@ -658,32 +657,21 @@ int sctp_cmp_addr_exact(const union sctp_addr *ss1,
...
@@ -658,32 +657,21 @@ int sctp_cmp_addr_exact(const union sctp_addr *ss1,
}
}
/* Return an ecne chunk to get prepended to a packet.
/* Return an ecne chunk to get prepended to a packet.
* Note: We are sly and return a shared, prealloced chunk.
* Note: We are sly and return a shared, prealloced chunk. FIXME:
* No we don't, but we could/should.
*/
*/
sctp_chunk_t
*
sctp_get_ecne_prepend
(
s
ctp_association_t
*
asoc
)
sctp_chunk_t
*
sctp_get_ecne_prepend
(
s
truct
sctp_association
*
asoc
)
{
{
sctp_chunk_t
*
chunk
;
struct
sctp_chunk
*
chunk
;
int
need_ecne
;
__u32
lowest_tsn
;
/*
Can be called from task or bh. Both need_ecne and
/*
Send ECNE if needed.
*
last_ecne_tsn are written during bh.
*
Not being able to allocate a chunk here is not deadly.
*/
*/
need_ecne
=
asoc
->
need_ecne
;
if
(
asoc
->
need_ecne
)
lowest_tsn
=
asoc
->
last_ecne_tsn
;
chunk
=
sctp_make_ecne
(
asoc
,
asoc
->
last_ecne_tsn
);
else
if
(
need_ecne
)
{
chunk
=
sctp_make_ecne
(
asoc
,
lowest_tsn
);
/* ECNE is not mandatory to the flow. Being unable to
* alloc mem is not deadly. We are just unable to help
* out the network. If we run out of memory, just return
* NULL.
*/
}
else
{
chunk
=
NULL
;
chunk
=
NULL
;
}
return
chunk
;
return
chunk
;
}
}
...
@@ -986,8 +974,7 @@ void sctp_assoc_sync_pmtu(sctp_association_t *asoc)
...
@@ -986,8 +974,7 @@ void sctp_assoc_sync_pmtu(sctp_association_t *asoc)
if
(
pmtu
)
{
if
(
pmtu
)
{
asoc
->
pmtu
=
pmtu
;
asoc
->
pmtu
=
pmtu
;
asoc
->
frag_point
=
pmtu
-
(
SCTP_IP_OVERHEAD
+
asoc
->
frag_point
=
sctp_frag_point
(
pmtu
);
sizeof
(
sctp_data_chunk_t
));
}
}
SCTP_DEBUG_PRINTK
(
"%s: asoc:%p, pmtu:%d, frag_point:%d
\n
"
,
SCTP_DEBUG_PRINTK
(
"%s: asoc:%p, pmtu:%d, frag_point:%d
\n
"
,
...
...
net/sctp/input.c
View file @
9850a96f
...
@@ -207,21 +207,19 @@ int sctp_rcv(struct sk_buff *skb)
...
@@ -207,21 +207,19 @@ int sctp_rcv(struct sk_buff *skb)
*/
*/
sctp_bh_lock_sock
(
sk
);
sctp_bh_lock_sock
(
sk
);
if
(
sock_owned_by_user
(
sk
))
{
if
(
sock_owned_by_user
(
sk
))
sk_add_backlog
(
sk
,
(
struct
sk_buff
*
)
chunk
);
sk_add_backlog
(
sk
,
(
struct
sk_buff
*
)
chunk
);
}
else
{
else
sctp_backlog_rcv
(
sk
,
(
struct
sk_buff
*
)
chunk
);
sctp_backlog_rcv
(
sk
,
(
struct
sk_buff
*
)
chunk
);
}
/* Release the sock and any reference counts we took in the
/* Release the sock and any reference counts we took in the
* lookup calls.
* lookup calls.
*/
*/
sctp_bh_unlock_sock
(
sk
);
sctp_bh_unlock_sock
(
sk
);
if
(
asoc
)
{
if
(
asoc
)
sctp_association_put
(
asoc
);
sctp_association_put
(
asoc
);
}
else
{
else
sctp_endpoint_put
(
ep
);
sctp_endpoint_put
(
ep
);
}
sock_put
(
sk
);
sock_put
(
sk
);
return
ret
;
return
ret
;
...
@@ -268,10 +266,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
...
@@ -268,10 +266,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
}
}
/* Handle icmp frag needed error. */
/* Handle icmp frag needed error. */
static
inline
void
sctp_icmp_frag_needed
(
struct
sock
*
sk
,
void
sctp_icmp_frag_needed
(
struct
sock
*
sk
,
struct
sctp_association
*
asoc
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
,
__u32
pmtu
)
struct
sctp_transport
*
transport
,
__u32
pmtu
)
{
{
if
(
unlikely
(
pmtu
<
SCTP_DEFAULT_MINSEGMENT
))
{
if
(
unlikely
(
pmtu
<
SCTP_DEFAULT_MINSEGMENT
))
{
printk
(
KERN_WARNING
"%s: Reported pmtu %d too low, "
printk
(
KERN_WARNING
"%s: Reported pmtu %d too low, "
...
@@ -280,54 +276,38 @@ static inline void sctp_icmp_frag_needed(struct sock *sk,
...
@@ -280,54 +276,38 @@ static inline void sctp_icmp_frag_needed(struct sock *sk,
pmtu
=
SCTP_DEFAULT_MINSEGMENT
;
pmtu
=
SCTP_DEFAULT_MINSEGMENT
;
}
}
if
(
!
sock_owned_by_user
(
sk
)
&&
t
ransport
&&
(
transpor
t
->
pmtu
!=
pmtu
))
{
if
(
!
sock_owned_by_user
(
sk
)
&&
t
&&
(
t
->
pmtu
!=
pmtu
))
{
t
ransport
->
pmtu
=
pmtu
;
t
->
pmtu
=
pmtu
;
sctp_assoc_sync_pmtu
(
asoc
);
sctp_assoc_sync_pmtu
(
asoc
);
sctp_retransmit
(
&
asoc
->
outqueue
,
transport
,
sctp_retransmit
(
&
asoc
->
outqueue
,
t
,
SCTP_RTXR_PMTUD
);
SCTP_RETRANSMIT_PMTU_DISCOVERY
);
}
}
}
}
/*
/* Common lookup code for icmp/icmpv6 error handler. */
* This routine is called by the ICMP module when it gets some
struct
sock
*
sctp_err_lookup
(
int
family
,
struct
sk_buff
*
skb
,
* sort of error condition. If err < 0 then the socket should
struct
sctphdr
*
sctphdr
,
* be closed and the error returned to the user. If err > 0
struct
sctp_endpoint
**
epp
,
* it's just the icmp type << 8 | icmp code. After adjustment
struct
sctp_association
**
app
,
* header points to the first 8 bytes of the sctp header. We need
struct
sctp_transport
**
tpp
)
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
__u32
info
)
{
{
struct
iphdr
*
iph
=
(
struct
iphdr
*
)
skb
->
data
;
union
sctp_addr
saddr
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)(
skb
->
data
+
(
iph
->
ihl
<<
2
));
union
sctp_addr
daddr
;
int
type
=
skb
->
h
.
icmph
->
type
;
struct
sctp_af
*
af
;
int
code
=
skb
->
h
.
icmph
->
code
;
union
sctp_addr
saddr
,
daddr
;
struct
inet_opt
*
inet
;
struct
sock
*
sk
=
NULL
;
struct
sock
*
sk
=
NULL
;
sctp_endpoint_t
*
ep
=
NULL
;
struct
sctp_endpoint
*
ep
=
NULL
;
sctp_association_t
*
asoc
=
NULL
;
struct
sctp_association
*
asoc
=
NULL
;
struct
sctp_transport
*
transport
;
struct
sctp_transport
*
transport
=
NULL
;
int
err
;
if
(
skb
->
len
<
((
iph
->
ihl
<<
2
)
+
8
))
{
*
app
=
NULL
;
*
epp
=
NULL
;
*
tpp
=
NULL
;
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
af
=
sctp_get_af_specific
(
family
);
if
(
unlikely
(
!
af
))
{
return
NULL
;
}
}
saddr
.
v4
.
sin_family
=
AF_INET
;
/* Initialize local addresses for lookups. */
saddr
.
v4
.
sin_port
=
ntohs
(
sh
->
source
);
af
->
from_skb
(
&
saddr
,
skb
,
1
);
memcpy
(
&
saddr
.
v4
.
sin_addr
.
s_addr
,
&
iph
->
saddr
,
sizeof
(
struct
in_addr
));
af
->
from_skb
(
&
daddr
,
skb
,
0
);
daddr
.
v4
.
sin_family
=
AF_INET
;
daddr
.
v4
.
sin_port
=
ntohs
(
sh
->
dest
);
memcpy
(
&
daddr
.
v4
.
sin_addr
.
s_addr
,
&
iph
->
daddr
,
sizeof
(
struct
in_addr
));
/* Look for an association that matches the incoming ICMP error
/* Look for an association that matches the incoming ICMP error
* packet.
* packet.
...
@@ -340,13 +320,12 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
...
@@ -340,13 +320,12 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
*/
*/
ep
=
__sctp_rcv_lookup_endpoint
(
&
daddr
);
ep
=
__sctp_rcv_lookup_endpoint
(
&
daddr
);
if
(
!
ep
)
{
if
(
!
ep
)
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
NULL
;
return
;
}
}
}
}
if
(
asoc
)
{
if
(
asoc
)
{
if
(
ntohl
(
s
h
->
vtag
)
!=
asoc
->
c
.
peer_vtag
)
{
if
(
ntohl
(
s
ctphdr
->
vtag
)
!=
asoc
->
c
.
peer_vtag
)
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
ICMP_INC_STATS_BH
(
IcmpInErrors
);
goto
out
;
goto
out
;
}
}
...
@@ -355,12 +334,90 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
...
@@ -355,12 +334,90 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
sk
=
ep
->
base
.
sk
;
sk
=
ep
->
base
.
sk
;
sctp_bh_lock_sock
(
sk
);
sctp_bh_lock_sock
(
sk
);
/* If too many ICMPs get dropped on busy
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
* servers this needs to be solved differently.
*/
*/
if
(
sock_owned_by_user
(
sk
))
if
(
sock_owned_by_user
(
sk
))
NET_INC_STATS_BH
(
LockDroppedIcmps
);
NET_INC_STATS_BH
(
LockDroppedIcmps
);
*
epp
=
ep
;
*
app
=
asoc
;
*
tpp
=
transport
;
return
sk
;
out:
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
sctp_endpoint_put
(
ep
);
return
NULL
;
}
/* Common cleanup code for icmp/icmpv6 error handler. */
void
sctp_err_finish
(
struct
sock
*
sk
,
struct
sctp_endpoint
*
ep
,
struct
sctp_association
*
asoc
)
{
sctp_bh_unlock_sock
(
sk
);
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
sctp_endpoint_put
(
ep
);
}
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code. After adjustment
* header points to the first 8 bytes of the sctp header. We need
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
__u32
info
)
{
struct
iphdr
*
iph
=
(
struct
iphdr
*
)
skb
->
data
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)(
skb
->
data
+
(
iph
->
ihl
<<
2
));
int
type
=
skb
->
h
.
icmph
->
type
;
int
code
=
skb
->
h
.
icmph
->
code
;
struct
sock
*
sk
;
sctp_endpoint_t
*
ep
;
sctp_association_t
*
asoc
;
struct
sctp_transport
*
transport
;
struct
inet_opt
*
inet
;
char
*
saveip
,
*
savesctp
;
int
err
;
if
(
skb
->
len
<
((
iph
->
ihl
<<
2
)
+
8
))
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
}
/* Fix up skb to look at the embedded net header. */
saveip
=
skb
->
nh
.
raw
;
savesctp
=
skb
->
h
.
raw
;
skb
->
nh
.
iph
=
iph
;
skb
->
h
.
raw
=
(
char
*
)
sh
;
sk
=
sctp_err_lookup
(
AF_INET
,
skb
,
sh
,
&
ep
,
&
asoc
,
&
transport
);
/* Put back, the original pointers. */
skb
->
nh
.
raw
=
saveip
;
skb
->
h
.
raw
=
savesctp
;
if
(
!
sk
)
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
}
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
*/
switch
(
type
)
{
switch
(
type
)
{
case
ICMP_PARAMETERPROB
:
case
ICMP_PARAMETERPROB
:
err
=
EPROTO
;
err
=
EPROTO
;
...
@@ -399,13 +456,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
...
@@ -399,13 +456,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
}
}
out_unlock:
out_unlock:
sctp_bh_unlock_sock
(
sk
);
sctp_err_finish
(
sk
,
ep
,
asoc
);
out:
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
sctp_endpoint_put
(
ep
);
}
}
/*
/*
...
@@ -782,8 +833,3 @@ sctp_association_t *__sctp_rcv_lookup(struct sk_buff *skb,
...
@@ -782,8 +833,3 @@ sctp_association_t *__sctp_rcv_lookup(struct sk_buff *skb,
return
asoc
;
return
asoc
;
}
}
net/sctp/ipv6.c
View file @
9850a96f
/* SCTP kernel reference Implementation
/* SCTP kernel reference Implementation
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
* Copyright (c) 2001 La Monte H.P. Yarroll
* Copyright (c) 2002 International Business Machines, Corp.
* Copyright (c) 2002
-2003
International Business Machines, Corp.
*
*
* This file is part of the SCTP kernel reference Implementation
* This file is part of the SCTP kernel reference Implementation
*
*
...
@@ -88,17 +88,62 @@ extern struct notifier_block sctp_inetaddr_notifier;
...
@@ -88,17 +88,62 @@ extern struct notifier_block sctp_inetaddr_notifier;
ntohs((addr)->s6_addr16[6]), \
ntohs((addr)->s6_addr16[6]), \
ntohs((addr)->s6_addr16[7])
ntohs((addr)->s6_addr16[7])
/* FIXME: Comments. */
/* ICMP error handler. */
static
inline
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
struct
inet6_skb_parm
*
opt
,
struct
inet6_skb_parm
*
opt
,
int
type
,
int
code
,
int
offset
,
__u32
info
)
int
type
,
int
code
,
int
offset
,
__u32
info
)
{
{
/* BUG. WRITE ME. */
struct
ipv6hdr
*
iph
=
(
struct
ipv6hdr
*
)
skb
->
data
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)(
skb
->
data
+
offset
);
struct
sock
*
sk
;
sctp_endpoint_t
*
ep
;
sctp_association_t
*
asoc
;
struct
sctp_transport
*
transport
;
struct
ipv6_pinfo
*
np
;
char
*
saveip
,
*
savesctp
;
int
err
;
/* Fix up skb to look at the embedded net header. */
saveip
=
skb
->
nh
.
raw
;
savesctp
=
skb
->
h
.
raw
;
skb
->
nh
.
ipv6h
=
iph
;
skb
->
h
.
raw
=
(
char
*
)
sh
;
sk
=
sctp_err_lookup
(
AF_INET6
,
skb
,
sh
,
&
ep
,
&
asoc
,
&
transport
);
/* Put back, the original pointers. */
skb
->
nh
.
raw
=
saveip
;
skb
->
h
.
raw
=
savesctp
;
if
(
!
sk
)
{
ICMP6_INC_STATS_BH
(
Icmp6InErrors
);
return
;
}
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
*/
switch
(
type
)
{
case
ICMPV6_PKT_TOOBIG
:
sctp_icmp_frag_needed
(
sk
,
asoc
,
transport
,
ntohl
(
info
));
goto
out_unlock
;
default:
break
;
}
np
=
inet6_sk
(
sk
);
icmpv6_err_convert
(
type
,
code
,
&
err
);
if
(
!
sock_owned_by_user
(
sk
)
&&
np
->
recverr
)
{
sk
->
err
=
err
;
sk
->
error_report
(
sk
);
}
else
{
/* Only an error on timeout */
sk
->
err_soft
=
err
;
}
out_unlock:
sctp_err_finish
(
sk
,
ep
,
asoc
);
}
}
/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
static
in
line
int
sctp_v6_xmit
(
struct
sk_buff
*
skb
,
static
in
t
sctp_v6_xmit
(
struct
sk_buff
*
skb
,
struct
sctp_transport
*
transport
,
struct
sctp_transport
*
transport
,
int
ipfragok
)
int
ipfragok
)
{
{
struct
sock
*
sk
=
skb
->
sk
;
struct
sock
*
sk
=
skb
->
sk
;
struct
ipv6_pinfo
*
np
=
inet6_sk
(
sk
);
struct
ipv6_pinfo
*
np
=
inet6_sk
(
sk
);
...
@@ -110,9 +155,9 @@ static inline int sctp_v6_xmit(struct sk_buff *skb,
...
@@ -110,9 +155,9 @@ static inline int sctp_v6_xmit(struct sk_buff *skb,
/* Fill in the dest address from the route entry passed with the skb
/* Fill in the dest address from the route entry passed with the skb
* and the source address from the transport.
* and the source address from the transport.
*/
*/
fl
.
fl6_dst
=
&
rt6
->
rt6i_dst
.
addr
;
fl
.
fl6_dst
=
&
rt6
->
rt6i_dst
.
addr
;
fl
.
fl6_src
=
&
transport
->
saddr
.
v6
.
sin6_addr
;
fl
.
fl6_src
=
&
transport
->
saddr
.
v6
.
sin6_addr
;
fl
.
fl6_flowlabel
=
np
->
flow_label
;
fl
.
fl6_flowlabel
=
np
->
flow_label
;
IP6_ECN_flow_xmit
(
sk
,
fl
.
fl6_flowlabel
);
IP6_ECN_flow_xmit
(
sk
,
fl
.
fl6_flowlabel
);
...
@@ -174,7 +219,7 @@ struct dst_entry *sctp_v6_get_dst(sctp_association_t *asoc,
...
@@ -174,7 +219,7 @@ struct dst_entry *sctp_v6_get_dst(sctp_association_t *asoc,
/* Returns the number of consecutive initial bits that match in the 2 ipv6
/* Returns the number of consecutive initial bits that match in the 2 ipv6
* addresses.
* addresses.
*/
*/
static
inline
int
sctp_v6_addr_match_len
(
union
sctp_addr
*
s1
,
static
inline
int
sctp_v6_addr_match_len
(
union
sctp_addr
*
s1
,
union
sctp_addr
*
s2
)
union
sctp_addr
*
s2
)
{
{
...
@@ -186,7 +231,7 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
...
@@ -186,7 +231,7 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
__u32
a1xora2
;
__u32
a1xora2
;
a1xora2
=
a1
->
s6_addr32
[
i
]
^
a2
->
s6_addr32
[
i
];
a1xora2
=
a1
->
s6_addr32
[
i
]
^
a2
->
s6_addr32
[
i
];
if
((
j
=
fls
(
ntohl
(
a1xora2
))))
if
((
j
=
fls
(
ntohl
(
a1xora2
))))
return
(
i
*
32
+
32
-
j
);
return
(
i
*
32
+
32
-
j
);
}
}
...
@@ -196,7 +241,7 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
...
@@ -196,7 +241,7 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
/* Fills in the source address(saddr) based on the destination address(daddr)
/* Fills in the source address(saddr) based on the destination address(daddr)
* and asoc's bind address list.
* and asoc's bind address list.
*/
*/
void
sctp_v6_get_saddr
(
sctp_association_t
*
asoc
,
struct
dst_entry
*
dst
,
void
sctp_v6_get_saddr
(
sctp_association_t
*
asoc
,
struct
dst_entry
*
dst
,
union
sctp_addr
*
daddr
,
union
sctp_addr
*
saddr
)
union
sctp_addr
*
daddr
,
union
sctp_addr
*
saddr
)
{
{
...
@@ -432,7 +477,7 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr)
...
@@ -432,7 +477,7 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr)
return
retval
;
return
retval
;
}
}
/* Create and initialize a new sk for the socket to be returned by accept(). */
/* Create and initialize a new sk for the socket to be returned by accept(). */
struct
sock
*
sctp_v6_create_accept_sk
(
struct
sock
*
sk
,
struct
sock
*
sctp_v6_create_accept_sk
(
struct
sock
*
sk
,
struct
sctp_association
*
asoc
)
struct
sctp_association
*
asoc
)
{
{
...
@@ -469,11 +514,11 @@ struct sock *sctp_v6_create_accept_sk(struct sock *sk,
...
@@ -469,11 +514,11 @@ struct sock *sctp_v6_create_accept_sk(struct sock *sk,
memcpy
(
newnp
,
np
,
sizeof
(
struct
ipv6_pinfo
));
memcpy
(
newnp
,
np
,
sizeof
(
struct
ipv6_pinfo
));
ipv6_addr_copy
(
&
newnp
->
daddr
,
&
asoc
->
peer
.
primary_addr
.
v6
.
sin6_addr
);
ipv6_addr_copy
(
&
newnp
->
daddr
,
&
asoc
->
peer
.
primary_addr
.
v6
.
sin6_addr
);
newinet
->
sport
=
inet
->
sport
;
newinet
->
sport
=
inet
->
sport
;
newinet
->
dport
=
asoc
->
peer
.
port
;
newinet
->
dport
=
asoc
->
peer
.
port
;
#ifdef INET_REFCNT_DEBUG
#ifdef INET_REFCNT_DEBUG
atomic_inc
(
&
inet6_sock_nr
);
atomic_inc
(
&
inet6_sock_nr
);
atomic_inc
(
&
inet_sock_nr
);
atomic_inc
(
&
inet_sock_nr
);
...
@@ -623,11 +668,11 @@ static int sctp_inet6_bind_verify(struct sctp_opt *opt, union sctp_addr *addr)
...
@@ -623,11 +668,11 @@ static int sctp_inet6_bind_verify(struct sctp_opt *opt, union sctp_addr *addr)
/* Fill in Supported Address Type information for INIT and INIT-ACK
/* Fill in Supported Address Type information for INIT and INIT-ACK
* chunks. Note: In the future, we may want to look at sock options
* chunks. Note: In the future, we may want to look at sock options
* to determine whether a PF_INET6 socket really wants to have IPV4
* to determine whether a PF_INET6 socket really wants to have IPV4
* addresses.
* addresses.
* Returns number of addresses supported.
* Returns number of addresses supported.
*/
*/
static
int
sctp_inet6_supported_addrs
(
const
struct
sctp_opt
*
opt
,
static
int
sctp_inet6_supported_addrs
(
const
struct
sctp_opt
*
opt
,
__u16
*
types
)
__u16
*
types
)
{
{
types
[
0
]
=
SCTP_PARAM_IPV4_ADDRESS
;
types
[
0
]
=
SCTP_PARAM_IPV4_ADDRESS
;
types
[
1
]
=
SCTP_PARAM_IPV6_ADDRESS
;
types
[
1
]
=
SCTP_PARAM_IPV6_ADDRESS
;
...
...
net/sctp/output.c
View file @
9850a96f
...
@@ -79,6 +79,7 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
...
@@ -79,6 +79,7 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
packet
->
ecn_capable
=
ecn_capable
;
packet
->
ecn_capable
=
ecn_capable
;
packet
->
get_prepend_chunk
=
prepend_handler
;
packet
->
get_prepend_chunk
=
prepend_handler
;
packet
->
has_cookie_echo
=
0
;
packet
->
has_cookie_echo
=
0
;
packet
->
has_sack
=
0
;
packet
->
ipfragok
=
0
;
packet
->
ipfragok
=
0
;
/* We might need to call the prepend_handler right away. */
/* We might need to call the prepend_handler right away. */
...
@@ -100,6 +101,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
...
@@ -100,6 +101,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
packet
->
ecn_capable
=
0
;
packet
->
ecn_capable
=
0
;
packet
->
get_prepend_chunk
=
NULL
;
packet
->
get_prepend_chunk
=
NULL
;
packet
->
has_cookie_echo
=
0
;
packet
->
has_cookie_echo
=
0
;
packet
->
has_sack
=
0
;
packet
->
ipfragok
=
0
;
packet
->
ipfragok
=
0
;
packet
->
malloced
=
0
;
packet
->
malloced
=
0
;
sctp_packet_reset
(
packet
);
sctp_packet_reset
(
packet
);
...
@@ -155,6 +157,37 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
...
@@ -155,6 +157,37 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
return
retval
;
return
retval
;
}
}
/* Try to bundle a SACK with the packet. */
static
sctp_xmit_t
sctp_packet_bundle_sack
(
struct
sctp_packet
*
pkt
,
struct
sctp_chunk
*
chunk
)
{
sctp_xmit_t
retval
=
SCTP_XMIT_OK
;
/* If sending DATA and haven't aleady bundled a SACK, try to
* bundle one in to the packet.
*/
if
(
sctp_chunk_is_data
(
chunk
)
&&
!
pkt
->
has_sack
&&
!
pkt
->
has_cookie_echo
)
{
struct
sctp_association
*
asoc
;
asoc
=
pkt
->
transport
->
asoc
;
if
(
asoc
->
a_rwnd
>
asoc
->
rwnd
)
{
struct
sctp_chunk
*
sack
;
asoc
->
a_rwnd
=
asoc
->
rwnd
;
sack
=
sctp_make_sack
(
asoc
);
if
(
sack
)
{
struct
timer_list
*
timer
;
retval
=
sctp_packet_append_chunk
(
pkt
,
sack
);
asoc
->
peer
.
sack_needed
=
0
;
timer
=
&
asoc
->
timers
[
SCTP_EVENT_TIMEOUT_SACK
];
if
(
timer_pending
(
timer
)
&&
del_timer
(
timer
))
sctp_association_put
(
asoc
);
}
}
}
return
retval
;
}
/* Append a chunk to the offered packet reporting back any inability to do
/* Append a chunk to the offered packet reporting back any inability to do
* so.
* so.
*/
*/
...
@@ -167,6 +200,10 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
...
@@ -167,6 +200,10 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
size_t
pmtu
;
size_t
pmtu
;
int
too_big
;
int
too_big
;
retval
=
sctp_packet_bundle_sack
(
packet
,
chunk
);
if
(
retval
!=
SCTP_XMIT_OK
)
goto
finish
;
pmtu
=
((
packet
->
transport
->
asoc
)
?
pmtu
=
((
packet
->
transport
->
asoc
)
?
(
packet
->
transport
->
asoc
->
pmtu
)
:
(
packet
->
transport
->
asoc
->
pmtu
)
:
(
packet
->
transport
->
pmtu
));
(
packet
->
transport
->
pmtu
));
...
@@ -216,9 +253,10 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
...
@@ -216,9 +253,10 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
retval
=
sctp_packet_append_data
(
packet
,
chunk
);
retval
=
sctp_packet_append_data
(
packet
,
chunk
);
if
(
SCTP_XMIT_OK
!=
retval
)
if
(
SCTP_XMIT_OK
!=
retval
)
goto
finish
;
goto
finish
;
}
else
if
(
SCTP_CID_COOKIE_ECHO
==
chunk
->
chunk_hdr
->
type
)
{
}
else
if
(
SCTP_CID_COOKIE_ECHO
==
chunk
->
chunk_hdr
->
type
)
packet
->
has_cookie_echo
=
1
;
packet
->
has_cookie_echo
=
1
;
}
else
if
(
SCTP_CID_SACK
==
chunk
->
chunk_hdr
->
type
)
packet
->
has_sack
=
1
;
/* It is OK to send this chunk. */
/* It is OK to send this chunk. */
__skb_queue_tail
(
&
packet
->
chunks
,
(
struct
sk_buff
*
)
chunk
);
__skb_queue_tail
(
&
packet
->
chunks
,
(
struct
sk_buff
*
)
chunk
);
...
...
net/sctp/outqueue.c
View file @
9850a96f
...
@@ -357,7 +357,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
...
@@ -357,7 +357,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
__u8
fast_retransmit
=
0
;
__u8
fast_retransmit
=
0
;
switch
(
reason
)
{
switch
(
reason
)
{
case
SCTP_R
ETRANSMIT
_T3_RTX
:
case
SCTP_R
TXR
_T3_RTX
:
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_T3_RTX
);
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_T3_RTX
);
/* Update the retran path if the T3-rtx timer has expired for
/* Update the retran path if the T3-rtx timer has expired for
* the current retran path.
* the current retran path.
...
@@ -365,10 +365,11 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
...
@@ -365,10 +365,11 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
if
(
transport
==
transport
->
asoc
->
peer
.
retran_path
)
if
(
transport
==
transport
->
asoc
->
peer
.
retran_path
)
sctp_assoc_update_retran_path
(
transport
->
asoc
);
sctp_assoc_update_retran_path
(
transport
->
asoc
);
break
;
break
;
case
SCTP_R
ETRANSMIT
_FAST_RTX
:
case
SCTP_R
TXR
_FAST_RTX
:
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_FAST_RTX
);
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_FAST_RTX
);
fast_retransmit
=
1
;
fast_retransmit
=
1
;
break
;
break
;
case
SCTP_RTXR_PMTUD
:
default:
default:
break
;
break
;
}
}
...
@@ -876,7 +877,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
...
@@ -876,7 +877,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
start_timer
=
0
;
start_timer
=
0
;
queue
=
&
q
->
out
;
queue
=
&
q
->
out
;
while
(
NULL
!=
(
chunk
=
sctp_outq_dequeue_data
(
q
)))
{
while
((
chunk
=
sctp_outq_dequeue_data
(
q
)))
{
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
* stream identifier.
*/
*/
...
@@ -891,9 +892,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
...
@@ -891,9 +892,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
if
(
ev
)
if
(
ev
)
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
ev
);
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
ev
);
/* Free the chunk. This chunk is not on any
/* Free the chunk. */
* list yet, just free it.
*/
sctp_free_chunk
(
chunk
);
sctp_free_chunk
(
chunk
);
continue
;
continue
;
}
}
...
@@ -1572,7 +1571,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
...
@@ -1572,7 +1571,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
if
(
transport
)
{
if
(
transport
)
{
if
(
do_fast_retransmit
)
if
(
do_fast_retransmit
)
sctp_retransmit
(
q
,
transport
,
SCTP_R
ETRANSMIT
_FAST_RTX
);
sctp_retransmit
(
q
,
transport
,
SCTP_R
TXR
_FAST_RTX
);
SCTP_DEBUG_PRINTK
(
"%s: transport: %p, cwnd: %d, "
SCTP_DEBUG_PRINTK
(
"%s: transport: %p, cwnd: %d, "
"ssthresh: %d, flight_size: %d, pba: %d
\n
"
,
"ssthresh: %d, flight_size: %d, pba: %d
\n
"
,
...
...
net/sctp/sm_sideeffect.c
View file @
9850a96f
...
@@ -55,1202 +55,1128 @@
...
@@ -55,1202 +55,1128 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/sm.h>
/* Do forward declarations of static functions. */
/********************************************************************
static
void
sctp_do_ecn_ce_work
(
sctp_association_t
*
,
__u32
lowest_tsn
);
* Helper functions
static
sctp_chunk_t
*
sctp_do_ecn_ecne_work
(
sctp_association_t
*
asoc
,
********************************************************************/
__u32
lowest_tsn
,
sctp_chunk_t
*
);
static
void
sctp_do_ecn_cwr_work
(
sctp_association_t
*
,
__u32
lowest_tsn
);
static
void
sctp_do_8_2_transport_strike
(
sctp_association_t
*
,
struct
sctp_transport
*
);
static
void
sctp_cmd_init_failed
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
);
static
void
sctp_cmd_assoc_failed
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_event_t
,
sctp_subtype_t
,
sctp_chunk_t
*
chunk
);
static
int
sctp_cmd_process_init
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_chunk_t
*
chunk
,
sctp_init_chunk_t
*
peer_init
,
int
priority
);
static
void
sctp_cmd_hb_timers_start
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
);
static
void
sctp_cmd_hb_timers_stop
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
);
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
struct
sctp_transport
*
);
static
void
sctp_cmd_transport_reset
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
struct
sctp_transport
*
);
static
void
sctp_cmd_transport_on
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
struct
sctp_transport
*
,
sctp_chunk_t
*
);
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_sackhdr_t
*
);
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_chunk_t
*
);
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_state_t
);
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
*/
#define DEBUG_PRE \
SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \
"ep %p, %s, %s, asoc %p[%s], %s\n", \
ep, sctp_evttype_tbl[event_type], \
(*debug_fn)(subtype), asoc, \
sctp_state_tbl[state], state_fn->name)
#define DEBUG_POST \
/* A helper function for delayed processing of INET ECN CE bit. */
SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \
static
void
sctp_do_ecn_ce_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
"asoc %p, status: %s\n", \
{
asoc, sctp_status_tbl[status])
/* Save the TSN away for comparison when we receive CWR */
#define DEBUG_POST_SFX \
asoc
->
last_ecne_tsn
=
lowest_tsn
;
SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \
asoc
->
need_ecne
=
1
;
error, asoc, \
}
sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED])
/*
/*
Helper function for delayed processing of SCTP ECNE chunk. */
* This is the master state machine processing function.
/* RFC 2960 Appendix A
*
*
* If you want to understand all of lksctp, this is a
* RFC 2481 details a specific bit for a sender to send in
* good place to start.
* the header of its next outbound TCP segment to indicate to
* its peer that it has reduced its congestion window. This
* is termed the CWR bit. For SCTP the same indication is made
* by including the CWR chunk. This chunk contains one data
* element, i.e. the TSN number that was sent in the ECNE chunk.
* This element represents the lowest TSN number in the datagram
* that was originally marked with the CE bit.
*/
*/
int
sctp_do_sm
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
static
sctp_chunk_t
*
sctp_do_ecn_ecne_work
(
sctp_association_t
*
asoc
,
sctp_state_t
state
,
__u32
lowest_tsn
,
sctp_endpoint_t
*
ep
,
sctp_chunk_t
*
chunk
)
sctp_association_t
*
asoc
,
void
*
event_arg
,
int
priority
)
{
{
sctp_cmd_seq_t
commands
;
sctp_chunk_t
*
repl
;
sctp_sm_table_entry_t
*
state_fn
;
sctp_disposition_t
status
;
int
error
=
0
;
typedef
const
char
*
(
printfn_t
)(
sctp_subtype_t
);
static
printfn_t
*
table
[]
=
{
/* Our previously transmitted packet ran into some congestion
NULL
,
sctp_cname
,
sctp_tname
,
sctp_oname
,
sctp_pname
,
* so we should take action by reducing cwnd and ssthresh
};
* and then ACK our peer that we we've done so by
printfn_t
*
debug_fn
__attribute__
((
unused
))
=
table
[
event_type
];
* sending a CWR.
*/
/* Look up the state function, run it, and then process the
/* First, try to determine if we want to actually lower
* side effects. These three steps are the heart of lksctp.
* our cwnd variables. Only lower them if the ECNE looks more
* recent than the last response.
*/
*/
state_fn
=
sctp_sm_lookup_event
(
event_type
,
state
,
subtype
);
if
(
TSN_lt
(
asoc
->
last_cwr_tsn
,
lowest_tsn
))
{
struct
sctp_transport
*
transport
;
sctp_init_cmd_seq
(
&
commands
);
/* Find which transport's congestion variables
* need to be adjusted.
*/
transport
=
sctp_assoc_lookup_tsn
(
asoc
,
lowest_tsn
);
DEBUG_PRE
;
/* Update the congestion variables. */
status
=
(
*
state_fn
->
fn
)(
ep
,
asoc
,
subtype
,
event_arg
,
&
commands
);
if
(
transport
)
DEBUG_POST
;
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_ECNE
);
asoc
->
last_cwr_tsn
=
lowest_tsn
;
}
error
=
sctp_side_effects
(
event_type
,
subtype
,
state
,
/* Always try to quiet the other end. In case of lost CWR,
ep
,
asoc
,
event_arg
,
* resend last_cwr_tsn.
status
,
&
commands
,
*/
priority
);
repl
=
sctp_make_cwr
(
asoc
,
asoc
->
last_cwr_tsn
,
chunk
);
DEBUG_POST_SFX
;
return
error
;
/* If we run out of memory, it will look like a lost CWR. We'll
* get back in sync eventually.
*/
return
repl
;
}
}
#undef DEBUG_PRE
/* Helper function to do delayed processing of ECN CWR chunk. */
#undef DEBUG_POST
static
void
sctp_do_ecn_cwr_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
/*****************************************************************
* This the master state function side effect processing function.
*****************************************************************/
int
sctp_side_effects
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
{
{
int
error
;
/* Turn off ECNE getting auto-prepended to every outgoing
* packet
/* FIXME - Most of the dispositions left today would be categorized
* as "exceptional" dispositions. For those dispositions, it
* may not be proper to run through any of the commands at all.
* For example, the command interpreter might be run only with
* disposition SCTP_DISPOSITION_CONSUME.
*/
*/
if
(
0
!=
(
error
=
sctp_cmd_interpreter
(
event_type
,
subtype
,
state
,
asoc
->
need_ecne
=
0
;
ep
,
asoc
,
}
event_arg
,
status
,
commands
,
priority
)))
goto
bail
;
switch
(
status
)
{
case
SCTP_DISPOSITION_DISCARD
:
SCTP_DEBUG_PRINTK
(
"Ignored sctp protocol event - state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
break
;
case
SCTP_DISPOSITION_NOMEM
:
/* Generate SACK if necessary. We call this at the end of a packet. */
/* We ran out of memory, so we need to discard this
int
sctp_gen_sack
(
struct
sctp_association
*
asoc
,
int
force
,
* packet.
sctp_cmd_seq_t
*
commands
)
*/
{
/* BUG--we should now recover some memory, probably by
__u32
ctsn
,
max_tsn_seen
;
* reneging...
struct
sctp_chunk
*
sack
;
*/
int
error
=
0
;
error
=
-
ENOMEM
;
break
;
case
SCTP_DISPOSITION_DELETE_TCB
:
if
(
force
)
/* This should now be a command. */
asoc
->
peer
.
sack_needed
=
1
;
break
;
case
SCTP_DISPOSITION_CONSUME
:
ctsn
=
sctp_tsnmap_get_ctsn
(
&
asoc
->
peer
.
tsn_map
);
case
SCTP_DISPOSITION_ABORT
:
max_tsn_seen
=
sctp_tsnmap_get_max_tsn_seen
(
&
asoc
->
peer
.
tsn_map
);
/*
* We should no longer have much work to do here as the
* real work has been done as explicit commands above.
*/
break
;
case
SCTP_DISPOSITION_VIOLATION
:
/* From 12.2 Parameters necessary per association (i.e. the TCB):
printk
(
KERN_ERR
"sctp protocol violation state %d "
*
"chunkid %d
\n
"
,
state
,
subtype
.
chunk
);
* Ack State : This flag indicates if the next received packet
break
;
* : is to be responded to with a SACK. ...
* : When DATA chunks are out of order, SACK's
* : are not delayed (see Section 6).
*
* [This is actually not mentioned in Section 6, but we
* implement it here anyway. --piggy]
*/
if
(
max_tsn_seen
!=
ctsn
)
asoc
->
peer
.
sack_needed
=
1
;
case
SCTP_DISPOSITION_NOT_IMPL
:
/* From 6.2 Acknowledgement on Reception of DATA Chunks:
printk
(
KERN_WARNING
"sctp unimplemented feature in state %d, "
*
"event_type %d, event_id %d
\n
"
,
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
state
,
event_type
,
subtype
.
chunk
);
* an acknowledgement SHOULD be generated for at least every
break
;
* second packet (not every second DATA chunk) received, and
* SHOULD be generated within 200 ms of the arrival of any
* unacknowledged DATA chunk. ...
*/
if
(
!
asoc
->
peer
.
sack_needed
)
{
/* We will need a SACK for the next packet. */
asoc
->
peer
.
sack_needed
=
1
;
goto
out
;
}
else
{
if
(
asoc
->
a_rwnd
>
asoc
->
rwnd
)
asoc
->
a_rwnd
=
asoc
->
rwnd
;
sack
=
sctp_make_sack
(
asoc
);
if
(
!
sack
)
goto
nomem
;
case
SCTP_DISPOSITION_BUG
:
asoc
->
peer
.
sack_needed
=
0
;
printk
(
KERN_ERR
"sctp bug in state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
default:
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
sack
);
printk
(
KERN_ERR
"sctp impossible disposition %d "
"in state %d, event_type %d, event_id %d
\n
"
,
status
,
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
};
bail:
/* Stop the SACK timer. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_STOP
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_SACK
));
}
out:
return
error
;
nomem:
error
=
-
ENOMEM
;
return
error
;
return
error
;
}
}
/********************************************************************
/* When the T3-RTX timer expires, it calls this function to create the
* 2nd Level Abstractions
* relevant state machine event.
********************************************************************/
*/
void
sctp_generate_t3_rtx_event
(
unsigned
long
peer
)
/* This is the side-effect interpreter. */
int
sctp_cmd_interpreter
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
{
{
int
error
=
0
;
int
error
;
int
force
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
peer
;
sctp_cmd_t
*
cmd
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
sctp_chunk_t
*
new_obj
;
sctp_chunk_t
*
chunk
=
NULL
;
struct
sctp_packet
*
packet
;
struct
list_head
*
pos
;
struct
timer_list
*
timer
;
unsigned
long
timeout
;
struct
sctp_transport
*
t
;
sctp_sackhdr_t
sackh
;
if
(
SCTP_EVENT_T_TIMEOUT
!=
event_type
)
/* Check whether a task is in the sock. */
chunk
=
(
sctp_chunk_t
*
)
event_arg
;
/* Note: This whole file is a huge candidate for rework.
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
* For example, each command could either have its own handler, so
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
* the loop would look like:
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
* while (cmds)
* cmd->handle(x, y, z)
* --jgrimm
*/
while
(
NULL
!=
(
cmd
=
sctp_next_cmd
(
commands
)))
{
switch
(
cmd
->
verb
)
{
case
SCTP_CMD_NOP
:
/* Do nothing. */
break
;
case
SCTP_CMD_NEW_ASOC
:
/* Try again later. */
/* Register a new association. */
if
(
!
mod_timer
(
&
transport
->
T3_rtx_timer
,
jiffies
+
(
HZ
/
20
)))
asoc
=
cmd
->
obj
.
ptr
;
sctp_transport_hold
(
transport
);
/* Register with the endpoint. */
goto
out_unlock
;
sctp_endpoint_add_asoc
(
ep
,
asoc
);
}
sctp_hash_established
(
asoc
);
break
;
case
SCTP_CMD_UPDATE_ASSOC
:
/* Is this transport really dead and just waiting around for
sctp_assoc_update
(
asoc
,
cmd
->
obj
.
ptr
);
* the timer to let go of the reference?
break
;
*/
if
(
transport
->
dead
)
goto
out_unlock
;
case
SCTP_CMD_PURGE_OUTQUEUE
:
/* Run through the state machine. */
sctp_outq_teardown
(
&
asoc
->
outqueue
);
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
break
;
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_T3_RTX
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
transport
,
GFP_ATOMIC
);
case
SCTP_CMD_DELETE_TCB
:
if
(
error
)
/* Delete the current association. */
asoc
->
base
.
sk
->
err
=
-
error
;
sctp_unhash_established
(
asoc
);
sctp_association_free
(
asoc
);
asoc
=
NULL
;
break
;
case
SCTP_CMD_NEW_STATE
:
out_unlock
:
/* Enter a new state. */
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_cmd_new_state
(
commands
,
asoc
,
cmd
->
obj
.
state
);
sctp_transport_put
(
transport
);
break
;
}
case
SCTP_CMD_REPORT_TSN
:
/* This is a sa interface for producing timeout events. It works
/* Record the arrival of a TSN. */
* for timeouts which use the association as their parameter.
sctp_tsnmap_mark
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
*/
break
;
static
void
sctp_generate_timeout_event
(
sctp_association_t
*
asoc
,
sctp_event_timeout_t
timeout_type
)
{
int
error
=
0
;
case
SCTP_CMD_GEN_SACK
:
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
/* Generate a Selective ACK.
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
* The argument tells us whether to just count
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy: timer %d
\n
"
,
* the packet and MAYBE generate a SACK, or
__FUNCTION__
,
* force a SACK out.
timeout_type
);
*/
force
=
cmd
->
obj
.
i32
;
error
=
sctp_gen_sack
(
asoc
,
force
,
commands
);
break
;
case
SCTP_CMD_PROCESS_SACK
:
/* Try again later. */
/* Process an inbound SACK. */
if
(
!
mod_timer
(
&
asoc
->
timers
[
timeout_type
],
jiffies
+
(
HZ
/
20
)))
error
=
sctp_cmd_process_sack
(
commands
,
asoc
,
sctp_association_hold
(
asoc
);
cmd
->
obj
.
ptr
)
;
goto
out_unlock
;
break
;
}
case
SCTP_CMD_GEN_INIT_ACK
:
/* Is this association really dead and just waiting around for
/* Generate an INIT ACK chunk. */
* the timer to let go of the reference?
new_obj
=
sctp_make_init_ack
(
asoc
,
chunk
,
GFP_ATOMIC
,
*/
0
);
if
(
asoc
->
base
.
dead
)
if
(
!
new_obj
)
goto
out_unlock
;
goto
nomem
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
/* Run through the state machine. */
SCTP_CHUNK
(
new_obj
));
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
break
;
SCTP_ST_TIMEOUT
(
timeout_type
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
(
void
*
)
timeout_type
,
GFP_ATOMIC
);
case
SCTP_CMD_PEER_INIT
:
if
(
error
)
/* Process a unified INIT from the peer.
asoc
->
base
.
sk
->
err
=
-
error
;
* Note: Only used during INIT-ACK processing. If
* there is an error just return to the outter
* layer which will bail.
*/
error
=
sctp_cmd_process_init
(
commands
,
asoc
,
chunk
,
cmd
->
obj
.
ptr
,
priority
);
break
;
case
SCTP_CMD_GEN_COOKIE_ECHO
:
out_unlock:
/* Generate a COOKIE ECHO chunk. */
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
new_obj
=
sctp_make_cookie_echo
(
asoc
,
chunk
);
sctp_association_put
(
asoc
);
if
(
!
new_obj
)
{
}
if
(
cmd
->
obj
.
ptr
)
sctp_free_chunk
(
cmd
->
obj
.
ptr
);
goto
nomem
;
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
/* If there is an ERROR chunk to be sent along with
void
sctp_generate_t1_cookie_event
(
unsigned
long
data
)
* the COOKIE_ECHO, send it, too.
{
*/
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
if
(
cmd
->
obj
.
ptr
)
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_COOKIE
);
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
}
SCTP_CHUNK
(
cmd
->
obj
.
ptr
));
break
;
case
SCTP_CMD_GEN_SHUTDOWN
:
void
sctp_generate_t1_init_event
(
unsigned
long
data
)
/* Generate SHUTDOWN when in SHUTDOWN_SENT state.
{
* Reset error counts.
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
*/
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_INIT
);
asoc
->
overall_error_count
=
0
;
}
/* Generate a SHUTDOWN chunk. */
void
sctp_generate_t2_shutdown_event
(
unsigned
long
data
)
new_obj
=
sctp_make_shutdown
(
asoc
);
{
if
(
!
new_obj
)
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
goto
nomem
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
);
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
}
SCTP_CHUNK
(
new_obj
));
break
;
case
SCTP_CMD_CHUNK_ULP
:
void
sctp_generate_t5_shutdown_guard_event
(
unsigned
long
data
)
/* Send a chunk to the sockets layer. */
{
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
"chunk_up:"
,
cmd
->
obj
.
ptr
,
sctp_generate_timeout_event
(
asoc
,
"ulpq:"
,
&
asoc
->
ulpq
);
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
);
sctp_ulpq_tail_data
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
case
SCTP_CMD_EVENT_ULP
:
}
/* sctp_generate_t5_shutdown_guard_event() */
/* Send a notification to the sockets layer. */
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
"event_up:"
,
cmd
->
obj
.
ptr
,
"ulpq:"
,
&
asoc
->
ulpq
);
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
);
break
;
case
SCTP_CMD_REPLY
:
void
sctp_generate_autoclose_event
(
unsigned
long
data
)
/* Send a chunk to our peer. */
{
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
cmd
->
obj
.
ptr
);
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_AUTOCLOSE
);
break
;
}
case
SCTP_CMD_SEND_PKT
:
/* Generate a heart beat event. If the sock is busy, reschedule. Make
/* Send a full packet to our peer. */
* sure that the transport is still valid.
packet
=
cmd
->
obj
.
ptr
;
*/
sctp_packet_transmit
(
packet
);
void
sctp_generate_heartbeat_event
(
unsigned
long
data
)
sctp_ootb_pkt_free
(
packet
);
{
break
;
int
error
=
0
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
data
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
case
SCTP_CMD_RETRAN
:
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
/* Mark a transport for retransmission. */
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
sctp_retransmit
(
&
asoc
->
outqueue
,
cmd
->
obj
.
transport
,
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
SCTP_RETRANSMIT_T3_RTX
);
break
;
case
SCTP_CMD_TRANSMIT
:
/* Try again later. */
/* Kick start transmission. */
if
(
!
mod_timer
(
&
transport
->
hb_timer
,
jiffies
+
(
HZ
/
20
)))
error
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
sctp_transport_hold
(
transport
);
break
;
goto
out_unlock
;
}
case
SCTP_CMD_ECN_CE
:
/* Is this structure just waiting around for us to actually
/* Do delayed CE processing. */
* get destroyed?
sctp_do_ecn_ce_work
(
asoc
,
cmd
->
obj
.
u32
);
*/
break
;
if
(
transport
->
dead
)
goto
out_unlock
;
case
SCTP_CMD_ECN_ECNE
:
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
/* Do delayed ECNE processing. */
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_HEARTBEAT
),
new_obj
=
sctp_do_ecn_ecne_work
(
asoc
,
cmd
->
obj
.
u32
,
asoc
->
state
,
chunk
);
asoc
->
ep
,
asoc
,
if
(
new_obj
)
transport
,
GFP_ATOMIC
);
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
case
SCTP_CMD_ECN_CWR
:
if
(
error
)
/* Do delayed CWR processing. */
asoc
->
base
.
sk
->
err
=
-
error
;
sctp_do_ecn_cwr_work
(
asoc
,
cmd
->
obj
.
u32
);
break
;
case
SCTP_CMD_SETUP_T2
:
out_unlock:
sctp_cmd_setup_t2
(
commands
,
asoc
,
cmd
->
obj
.
ptr
);
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
break
;
sctp_transport_put
(
transport
);
}
case
SCTP_CMD_TIMER_START
:
/* Inject a SACK Timeout event into the state machine. */
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
void
sctp_generate_sack_event
(
unsigned
long
data
)
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
{
if
(
!
timeout
)
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
BUG
();
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_SACK
);
}
timer
->
expires
=
jiffies
+
timeout
;
sctp_timer_event_t
*
sctp_timer_events
[
SCTP_NUM_TIMEOUT_TYPES
]
=
{
sctp_association_hold
(
asoc
);
NULL
,
add_timer
(
timer
);
sctp_generate_t1_cookie_event
,
break
;
sctp_generate_t1_init_event
,
sctp_generate_t2_shutdown_event
,
NULL
,
sctp_generate_t5_shutdown_guard_event
,
sctp_generate_heartbeat_event
,
sctp_generate_sack_event
,
sctp_generate_autoclose_event
,
};
case
SCTP_CMD_TIMER_RESTART
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
if
(
!
mod_timer
(
timer
,
jiffies
+
timeout
))
sctp_association_hold
(
asoc
);
break
;
case
SCTP_CMD_TIMER_STOP
:
/* RFC 2960 8.2 Path Failure Detection
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
*
if
(
timer_pending
(
timer
)
&&
del_timer
(
timer
))
* When its peer endpoint is multi-homed, an endpoint should keep a
sctp_association_put
(
asoc
);
* error counter for each of the destination transport addresses of the
break
;
* peer endpoint.
*
* Each time the T3-rtx timer expires on any address, or when a
* HEARTBEAT sent to an idle address is not acknowledged within a RTO,
* the error counter of that destination address will be incremented.
* When the value in the error counter exceeds the protocol parameter
* 'Path.Max.Retrans' of that destination address, the endpoint should
* mark the destination transport address as inactive, and a
* notification SHOULD be sent to the upper layer.
*
*/
static
void
sctp_do_8_2_transport_strike
(
sctp_association_t
*
asoc
,
struct
sctp_transport
*
transport
)
{
/* The check for association's overall error counter exceeding the
* threshold is done in the state function.
*/
asoc
->
overall_error_count
++
;
case
SCTP_CMD_INIT_RESTART
:
if
(
transport
->
active
&&
/* Do the needed accounting and updates
(
transport
->
error_count
++
>=
transport
->
error_threshold
))
{
* associated with restarting an initialization
SCTP_DEBUG_PRINTK
(
"transport_strike: transport "
* timer.
"IP:%d.%d.%d.%d failed.
\n
"
,
*/
NIPQUAD
(
transport
->
ipaddr
.
v4
.
sin_addr
));
asoc
->
counters
[
SCTP_COUNTER_INIT_ERROR
]
++
;
sctp_assoc_control_transport
(
asoc
,
transport
,
asoc
->
timeouts
[
cmd
->
obj
.
to
]
*=
2
;
SCTP_TRANSPORT_DOWN
,
if
(
asoc
->
timeouts
[
cmd
->
obj
.
to
]
>
SCTP_FAILED_THRESHOLD
);
asoc
->
max_init_timeo
)
{
}
asoc
->
timeouts
[
cmd
->
obj
.
to
]
=
asoc
->
max_init_timeo
;
}
/* If we've sent any data bundled with
/* E2) For the destination address for which the timer
* COOKIE-ECHO we need to resend.
* expires, set RTO <- RTO * 2 ("back off the timer"). The
*/
* maximum value discussed in rule C7 above (RTO.max) may be
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
* used to provide an upper bound to this doubling operation.
t
=
list_entry
(
pos
,
struct
sctp_transport
,
*/
transports
);
transport
->
rto
=
min
((
transport
->
rto
*
2
),
transport
->
asoc
->
rto_max
);
sctp_retransmit_mark
(
&
asoc
->
outqueue
,
t
,
0
);
}
}
sctp_add_cmd_sf
(
commands
,
/* Worker routine to handle INIT command failure. */
SCTP_CMD_TIMER_RESTART
,
static
void
sctp_cmd_init_failed
(
sctp_cmd_seq_t
*
commands
,
SCTP_TO
(
cmd
->
obj
.
to
));
sctp_association_t
*
asoc
)
break
;
{
struct
sctp_ulpevent
*
event
;
case
SCTP_CMD_INIT_FAILED
:
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_CANT_STR_ASSOC
,
sctp_cmd_init_failed
(
commands
,
asoc
);
0
,
0
,
0
,
GFP_ATOMIC
);
break
;
case
SCTP_CMD_ASSOC_FAILED
:
if
(
event
)
sctp_cmd_assoc_failed
(
commands
,
asoc
,
event_type
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
subtype
,
chunk
);
SCTP_ULPEVENT
(
event
));
break
;
case
SCTP_CMD_COUNTER_INC
:
/* FIXME: We need to handle data possibly either
asoc
->
counters
[
cmd
->
obj
.
counter
]
++
;
* sent via COOKIE-ECHO bundling or just waiting in
break
;
* the transmit queue, if the user has enabled
* SEND_FAILED notifications.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
}
case
SCTP_CMD_COUNTER_RESET
:
/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
asoc
->
counters
[
cmd
->
obj
.
counter
]
=
0
;
static
void
sctp_cmd_assoc_failed
(
sctp_cmd_seq_t
*
commands
,
break
;
sctp_association_t
*
asoc
,
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_chunk_t
*
chunk
)
{
struct
sctp_ulpevent
*
event
;
__u16
error
=
0
;
case
SCTP_CMD_REPORT_DUP
:
switch
(
event_type
)
{
sctp_tsnmap_mark_dup
(
&
asoc
->
peer
.
tsn_map
,
case
SCTP_EVENT_T_PRIMITIVE
:
cmd
->
obj
.
u32
);
if
(
SCTP_PRIMITIVE_ABORT
==
subtype
.
primitive
)
break
;
error
=
SCTP_ERROR_USER_ABORT
;
break
;
case
SCTP_EVENT_T_CHUNK
:
if
(
chunk
&&
(
SCTP_CID_ABORT
==
chunk
->
chunk_hdr
->
type
)
&&
(
ntohs
(
chunk
->
chunk_hdr
->
length
)
>=
(
sizeof
(
struct
sctp_chunkhdr
)
+
sizeof
(
struct
sctp_errhdr
))))
{
error
=
((
sctp_errhdr_t
*
)
chunk
->
skb
->
data
)
->
cause
;
}
break
;
default:
break
;
}
case
SCTP_CMD_REPORT_BAD_TAG
:
/* Cancel any partial delivery in progress. */
SCTP_DEBUG_PRINTK
(
"vtag mismatch!
\n
"
);
sctp_ulpq_abort_pd
(
&
asoc
->
ulpq
,
GFP_ATOMIC
);
break
;
case
SCTP_CMD_STRIKE
:
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_COMM_LOST
,
/* Mark one strike against a transport. */
error
,
0
,
0
,
GFP_ATOMIC
);
sctp_do_8_2_transport_strike
(
asoc
,
cmd
->
obj
.
transport
);
if
(
event
)
break
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
SCTP_ULPEVENT
(
event
));
case
SCTP_CMD_TRANSPORT_RESET
:
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_STATE
,
t
=
cmd
->
obj
.
transport
;
SCTP_STATE
(
SCTP_STATE_CLOSED
));
sctp_cmd_transport_reset
(
commands
,
asoc
,
t
);
break
;
case
SCTP_CMD_TRANSPORT_ON
:
/* FIXME: We need to handle data that could not be sent or was not
t
=
cmd
->
obj
.
transport
;
* acked, if the user has enabled SEND_FAILED notifications.
sctp_cmd_transport_on
(
commands
,
asoc
,
t
,
chunk
);
*/
break
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
}
case
SCTP_CMD_HB_TIMERS_START
:
/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
sctp_cmd_hb_timers_start
(
commands
,
asoc
);
* inside the cookie. In reality, this is only used for INIT-ACK processing
break
;
* since all other cases use "temporary" associations and can do all
* their work in statefuns directly.
*/
static
int
sctp_cmd_process_init
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
sctp_init_chunk_t
*
peer_init
,
int
priority
)
{
int
error
;
case
SCTP_CMD_HB_TIMER_UPDATE
:
/* We only process the init as a sideeffect in a single
t
=
cmd
->
obj
.
transport
;
* case. This is when we process the INIT-ACK. If we
sctp_cmd_hb_timer_update
(
commands
,
asoc
,
t
);
* fail during INIT processing (due to malloc problems),
break
;
* just return the error and stop processing the stack.
*/
case
SCTP_CMD_HB_TIMERS_STOP
:
if
(
!
sctp_process_init
(
asoc
,
chunk
->
chunk_hdr
->
type
,
sctp_cmd_hb_timers_stop
(
commands
,
asoc
);
sctp_source
(
chunk
),
peer_init
,
break
;
priority
))
error
=
-
ENOMEM
;
else
error
=
0
;
case
SCTP_CMD_REPORT_ERROR
:
return
error
;
error
=
cmd
->
obj
.
error
;
}
break
;
case
SCTP_CMD_PROCESS_CTSN
:
/* Helper function to break out starting up of heartbeat timers. */
/* Dummy up a SACK for processing. */
static
void
sctp_cmd_hb_timers_start
(
sctp_cmd_seq_t
*
cmds
,
sackh
.
cum_tsn_ack
=
cmd
->
obj
.
u32
;
sctp_association_t
*
asoc
)
sackh
.
a_rwnd
=
0
;
{
sackh
.
num_gap_ack_blocks
=
0
;
struct
sctp_transport
*
t
;
sackh
.
num_dup_tsns
=
0
;
struct
list_head
*
pos
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_PROCESS_SACK
,
SCTP_SACKH
(
&
sackh
));
break
;
case
SCTP_CMD_DISCARD_PACKET
:
/* Start a heartbeat timer for each transport on the association.
/* We need to discard the whole packet. */
* hold a reference on the transport to make sure none of
chunk
->
pdiscard
=
1
;
* the needed data structures go away.
break
;
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
case
SCTP_CMD_RTO_PENDING
:
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
t
=
cmd
->
obj
.
transport
;
sctp_transport_hold
(
t
)
;
t
->
rto_pending
=
1
;
}
break
;
}
case
SCTP_CMD_PART_DELIVER
:
static
void
sctp_cmd_hb_timers_stop
(
sctp_cmd_seq_t
*
cmds
,
sctp_ulpq_partial_delivery
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
sctp_association_t
*
asoc
)
GFP_ATOMIC
);
{
break
;
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
case
SCTP_CMD_RENEGE
:
/* Stop all heartbeat timers. */
sctp_ulpq_renege
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
default:
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
printk
(
KERN_WARNING
"Impossible command: %u, %p
\n
"
,
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
cmd
->
verb
,
cmd
->
obj
.
ptr
);
if
(
del_timer
(
&
t
->
hb_timer
))
break
;
sctp_transport_put
(
t
);
};
if
(
error
)
return
error
;
}
}
return
error
;
nomem:
error
=
-
ENOMEM
;
return
error
;
}
}
/* A helper function for delayed processing of INET ECN CE bit. */
/* Helper function to update the heartbeat timer. */
static
void
sctp_do_ecn_ce_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
{
{
/* Save the TSN away for comparison when we receive CWR */
/* Update the heartbeat timer. */
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
asoc
->
last_ecne_tsn
=
lowest_tsn
;
sctp_transport_hold
(
t
);
asoc
->
need_ecne
=
1
;
}
}
/* Helper function for delayed processing of SCTP ECNE chunk. */
/* Helper function to handle the reception of an HEARTBEAT ACK. */
/* RFC 2960 Appendix A
static
void
sctp_cmd_transport_on
(
sctp_cmd_seq_t
*
cmds
,
*
sctp_association_t
*
asoc
,
* RFC 2481 details a specific bit for a sender to send in
struct
sctp_transport
*
t
,
* the header of its next outbound TCP segment to indicate to
sctp_chunk_t
*
chunk
)
* its peer that it has reduced its congestion window. This
* is termed the CWR bit. For SCTP the same indication is made
* by including the CWR chunk. This chunk contains one data
* element, i.e. the TSN number that was sent in the ECNE chunk.
* This element represents the lowest TSN number in the datagram
* that was originally marked with the CE bit.
*/
static
sctp_chunk_t
*
sctp_do_ecn_ecne_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
,
sctp_chunk_t
*
chunk
)
{
{
sctp_chunk_t
*
repl
;
sctp_sender_hb_info_t
*
hbinfo
;
/* Our previously transmitted packet ran into some congestion
* so we should take action by reducing cwnd and ssthresh
* and then ACK our peer that we we've done so by
* sending a CWR.
*/
/* First, try to determine if we want to actually lower
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
* our cwnd variables. Only lower them if the ECNE looks more
* HEARTBEAT should clear the error counter of the destination
* recent than the last response.
* transport address to which the HEARTBEAT was sent.
* The association's overall error count is also cleared.
*/
*/
if
(
TSN_lt
(
asoc
->
last_cwr_tsn
,
lowest_tsn
))
{
t
->
error_count
=
0
;
struct
sctp_transport
*
transport
;
t
->
asoc
->
overall_error_count
=
0
;
/* Find which transport's congestion variables
* need to be adjusted.
*/
transport
=
sctp_assoc_lookup_tsn
(
asoc
,
lowest_tsn
);
/* Update the congestion variables. */
if
(
transport
)
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_ECNE
);
asoc
->
last_cwr_tsn
=
lowest_tsn
;
}
/*
Always try to quiet the other end. In case of lost CWR,
/*
Mark the destination transport address as active if it is not so
*
resend last_cwr_tsn
.
*
marked
.
*/
*/
repl
=
sctp_make_cwr
(
asoc
,
asoc
->
last_cwr_tsn
,
chunk
);
if
(
!
t
->
active
)
sctp_assoc_control_transport
(
asoc
,
t
,
SCTP_TRANSPORT_UP
,
SCTP_HEARTBEAT_SUCCESS
);
/* If we run out of memory, it will look like a lost CWR. We'll
/* The receiver of the HEARTBEAT ACK should also perform an
* get back in sync eventually.
* RTT measurement for that destination transport address
* using the time value carried in the HEARTBEAT ACK chunk.
*/
*/
return
repl
;
hbinfo
=
(
sctp_sender_hb_info_t
*
)
chunk
->
skb
->
data
;
sctp_transport_update_rto
(
t
,
(
jiffies
-
hbinfo
->
sent_at
));
}
}
/* Helper function to do delayed processing of ECN CWR chunk. */
/* Helper function to do a transport reset at the expiry of the hearbeat
static
void
sctp_do_ecn_cwr_work
(
sctp_association_t
*
asoc
,
* timer.
__u32
lowest_tsn
)
*/
static
void
sctp_cmd_transport_reset
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
{
{
/* Turn off ECNE getting auto-prepended to every outgoing
sctp_transport_lower_cwnd
(
t
,
SCTP_LOWER_CWND_INACTIVE
);
* packet
*/
asoc
->
need_ecne
=
0
;
}
/* This macro is to compress the text a bit... */
/* Mark one strike against a transport. */
#define AP(v) asoc->peer.v
sctp_do_8_2_transport_strike
(
asoc
,
t
);
}
/* Generate SACK if necessary. We call this at the end of a packet. */
/* Helper function to process the process SACK command. */
int
sctp_gen_sack
(
sctp_association_t
*
asoc
,
int
force
,
sctp_cmd_seq_t
*
commands
)
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_sackhdr_t
*
sackh
)
{
{
__u32
ctsn
,
max_tsn_seen
;
int
err
;
sctp_chunk_t
*
sack
;
int
error
=
0
;
if
(
force
)
asoc
->
peer
.
sack_needed
=
1
;
ctsn
=
sctp_tsnmap_get_ctsn
(
&
asoc
->
peer
.
tsn_map
);
max_tsn_seen
=
sctp_tsnmap_get_max_tsn_seen
(
&
asoc
->
peer
.
tsn_map
);
/* From 12.2 Parameters necessary per association (i.e. the TCB):
*
* Ack State : This flag indicates if the next received packet
* : is to be responded to with a SACK. ...
* : When DATA chunks are out of order, SACK's
* : are not delayed (see Section 6).
*
* [This is actually not mentioned in Section 6, but we
* implement it here anyway. --piggy]
*/
if
(
max_tsn_seen
!=
ctsn
)
asoc
->
peer
.
sack_needed
=
1
;
/* From 6.2 Acknowledgement on Reception of DATA Chunks:
if
(
sctp_outq_sack
(
&
asoc
->
outqueue
,
sackh
))
{
*
/* There are no more TSNs awaiting SACK. */
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
err
=
sctp_do_sm
(
SCTP_EVENT_T_OTHER
,
* an acknowledgement SHOULD be generated for at least every
SCTP_ST_OTHER
(
SCTP_EVENT_NO_PENDING_TSN
),
* second packet (not every second DATA chunk) received, and
asoc
->
state
,
asoc
->
ep
,
asoc
,
NULL
,
* SHOULD be generated within 200 ms of the arrival of any
GFP_ATOMIC
);
* unacknowledged DATA chunk. ...
*/
if
(
!
asoc
->
peer
.
sack_needed
)
{
/* We will need a SACK for the next packet. */
asoc
->
peer
.
sack_needed
=
1
;
goto
out
;
}
else
{
}
else
{
if
(
asoc
->
a_rwnd
>
asoc
->
rwnd
)
/* Windows may have opened, so we need
asoc
->
a_rwnd
=
asoc
->
rwnd
;
* to check if we have DATA to transmit
sack
=
sctp_make_sack
(
asoc
);
*/
if
(
!
sack
)
err
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
goto
nomem
;
asoc
->
peer
.
sack_needed
=
0
;
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
sack
);
/* Stop the SACK timer. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_STOP
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_SACK
));
}
}
out:
return
err
;
return
error
;
nomem:
error
=
-
ENOMEM
;
return
error
;
}
}
/* Handle a duplicate TSN. */
/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
void
sctp_do_TSNdup
(
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
long
gap
)
* the transport for a shutdown chunk.
*/
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
)
{
{
#if 0
struct
sctp_transport
*
t
;
sctp_chunk_t *sack;
/* Caution: gap < 2 * SCTP_TSN_MAP_SIZE
t
=
sctp_assoc_choose_shutdown_transport
(
asoc
);
* so gap can be negative.
asoc
->
shutdown_last_sent_to
=
t
;
*
asoc
->
timeouts
[
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
]
=
t
->
rto
;
* --xguo
chunk
->
transport
=
t
;
*/
}
/* Count this TSN. */
/* Helper function to change the state of an association. */
if (gap < SCTP_TSN_MAP_SIZE) {
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
asoc->peer.tsn_map[gap]++;
sctp_state_t
state
)
} else {
{
asoc->peer.tsn_map_overflow[gap - SCTP_TSN_MAP_SIZE]++;
}
/* From 6.2 Acknowledgement on Reception of DATA Chunks
struct
sock
*
sk
=
asoc
->
base
.
sk
;
*
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
* When a packet arrives with duplicate DATA chunk(s)
* and with no new DATA chunk(s), the endpoint MUST
* immediately send a SACK with no delay. If a packet
* arrives with duplicate DATA chunk(s) bundled with
* new DATA chunks, the endpoint MAY immediately send a
* SACK. Normally receipt of duplicate DATA chunks
* will occur when the original SACK chunk was lost and
* the peer's RTO has expired. The duplicate TSN
* number(s) SHOULD be reported in the SACK as
* duplicate.
*/
asoc->counters[SctpCounterAckState] = 2;
#endif /* 0 */
}
/* sctp_do_TSNdup() */
#undef AP
asoc
->
state
=
state
;
asoc
->
state_timestamp
=
jiffies
;
/* When the T3-RTX timer expires, it calls this function to create the
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
||
* relevant state machine event.
(
SCTP_STATE_CLOSED
==
asoc
->
state
))
{
*/
/* Wake up any processes waiting in the asoc's wait queue in
void
sctp_generate_t3_rtx_event
(
unsigned
long
peer
)
* sctp_wait_for_connect() or sctp_wait_for_sndbuf().
{
*/
int
error
;
if
(
waitqueue_active
(
&
asoc
->
wait
))
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
peer
;
wake_up_interruptible
(
&
asoc
->
wait
);
sctp_association_t
*
asoc
=
transport
->
asoc
;
/* Check whether a task is in the sock. */
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
/* Try again later. */
/* Wake up any processes waiting in the sk's sleep queue of
if
(
!
mod_timer
(
&
transport
->
T3_rtx_timer
,
jiffies
+
(
HZ
/
20
)))
* a TCP-style or UDP-style peeled-off socket in
sctp_transport_hold
(
transport
);
* sctp_wait_for_accept() or sctp_wait_for_packet().
goto
out_unlock
;
* For a UDP-style socket, the waiters are woken up by the
* notifications.
*/
if
(
SCTP_SOCKET_UDP
!=
sp
->
type
)
sk
->
state_change
(
sk
);
}
}
/*
Is this transport really dead and just waiting around for
/*
Change the sk->state of a TCP-style socket that has sucessfully
*
the timer to let go of the reference?
*
completed a connect() call.
*/
*/
if
(
transport
->
dead
)
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
&&
goto
out_unlock
;
(
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_CLOSED
==
sk
->
state
))
sk
->
state
=
SCTP_SS_ESTABLISHED
;
}
/* Run through the state machine. */
/* These three macros allow us to pull the debugging code out of the
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
* main flow of sctp_do_sm() to keep attention focused on the real
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_T3_RTX
),
* functionality there.
asoc
->
state
,
*/
asoc
->
ep
,
asoc
,
#define DEBUG_PRE \
transport
,
GFP_ATOMIC
);
SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \
"ep %p, %s, %s, asoc %p[%s], %s\n", \
ep, sctp_evttype_tbl[event_type], \
(*debug_fn)(subtype), asoc, \
sctp_state_tbl[state], state_fn->name)
if
(
error
)
#define DEBUG_POST \
asoc
->
base
.
sk
->
err
=
-
error
;
SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \
"asoc %p, status: %s\n", \
asoc, sctp_status_tbl[status])
out_unlock:
#define DEBUG_POST_SFX \
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \
sctp_transport_put
(
transport
);
error, asoc, \
}
sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED])
/* This is a sa interface for producing timeout events. It works
/*
* for timeouts which use the association as their parameter.
* This is the master state machine processing function.
*
* If you want to understand all of lksctp, this is a
* good place to start.
*/
*/
static
void
sctp_generate_timeout_event
(
sctp_association_t
*
asoc
,
int
sctp_do_sm
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_event_timeout_t
timeout_type
)
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
int
priority
)
{
{
sctp_cmd_seq_t
commands
;
sctp_sm_table_entry_t
*
state_fn
;
sctp_disposition_t
status
;
int
error
=
0
;
int
error
=
0
;
typedef
const
char
*
(
printfn_t
)(
sctp_subtype_t
);
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
static
printfn_t
*
table
[]
=
{
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
NULL
,
sctp_cname
,
sctp_tname
,
sctp_oname
,
sctp_pname
,
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy: timer %d
\n
"
,
};
__FUNCTION__
,
printfn_t
*
debug_fn
__attribute__
((
unused
))
=
table
[
event_type
];
timeout_type
);
/* Try again later. */
if
(
!
mod_timer
(
&
asoc
->
timers
[
timeout_type
],
jiffies
+
(
HZ
/
20
)))
sctp_association_hold
(
asoc
);
goto
out_unlock
;
}
/*
Is this association really dead and just waiting around for
/*
Look up the state function, run it, and then process the
*
the timer to let go of the reference?
*
side effects. These three steps are the heart of lksctp.
*/
*/
if
(
asoc
->
base
.
dead
)
state_fn
=
sctp_sm_lookup_event
(
event_type
,
state
,
subtype
);
goto
out_unlock
;
/* Run through the state machine. */
sctp_init_cmd_seq
(
&
commands
);
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
timeout_type
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
(
void
*
)
timeout_type
,
GFP_ATOMIC
);
if
(
error
)
DEBUG_PRE
;
asoc
->
base
.
sk
->
err
=
-
error
;
status
=
(
*
state_fn
->
fn
)(
ep
,
asoc
,
subtype
,
event_arg
,
&
commands
);
DEBUG_POST
;
out_unlock:
error
=
sctp_side_effects
(
event_type
,
subtype
,
state
,
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
ep
,
asoc
,
event_arg
,
sctp_association_put
(
asoc
);
status
,
&
commands
,
}
priority
);
DEBUG_POST_SFX
;
void
sctp_generate_t1_cookie_event
(
unsigned
long
data
)
return
error
;
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_COOKIE
);
}
}
void
sctp_generate_t1_init_event
(
unsigned
long
data
)
#undef DEBUG_PRE
{
#undef DEBUG_POST
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_INIT
);
}
void
sctp_generate_t2_shutdown_event
(
unsigned
long
data
)
/*****************************************************************
* This the master state function side effect processing function.
*****************************************************************/
int
sctp_side_effects
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
{
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
int
error
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
);
}
void
sctp_generate_t5_shutdown_guard_event
(
unsigned
long
data
)
/* FIXME - Most of the dispositions left today would be categorized
{
* as "exceptional" dispositions. For those dispositions, it
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
* may not be proper to run through any of the commands at all.
sctp_generate_timeout_event
(
asoc
,
* For example, the command interpreter might be run only with
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
);
* disposition SCTP_DISPOSITION_CONSUME.
*/
if
(
0
!=
(
error
=
sctp_cmd_interpreter
(
event_type
,
subtype
,
state
,
ep
,
asoc
,
event_arg
,
status
,
commands
,
priority
)))
goto
bail
;
}
/* sctp_generate_t5_shutdown_guard_event() */
switch
(
status
)
{
case
SCTP_DISPOSITION_DISCARD
:
SCTP_DEBUG_PRINTK
(
"Ignored sctp protocol event - state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
break
;
void
sctp_generate_autoclose_event
(
unsigned
long
data
)
case
SCTP_DISPOSITION_NOMEM
:
{
/* We ran out of memory, so we need to discard this
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
* packet.
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_AUTOCLOSE
);
*/
}
/* BUG--we should now recover some memory, probably by
* reneging...
*/
error
=
-
ENOMEM
;
break
;
/* Generate a heart beat event. If the sock is busy, reschedule. Make
case
SCTP_DISPOSITION_DELETE_TCB
:
* sure that the transport is still valid.
/* This should now be a command. */
*/
break
;
void
sctp_generate_heartbeat_event
(
unsigned
long
data
)
{
int
error
=
0
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
data
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
case
SCTP_DISPOSITION_CONSUME
:
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
case
SCTP_DISPOSITION_ABORT
:
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
/*
* We should no longer have much work to do here as the
* real work has been done as explicit commands above.
*/
break
;
/* Try again later. */
case
SCTP_DISPOSITION_VIOLATION
:
if
(
!
mod_timer
(
&
transport
->
hb_timer
,
jiffies
+
(
HZ
/
20
)))
printk
(
KERN_ERR
"sctp protocol violation state %d "
sctp_transport_hold
(
transport
);
"chunkid %d
\n
"
,
state
,
subtype
.
chunk
);
goto
out_unlock
;
break
;
}
/* Is this structure just waiting around for us to actually
case
SCTP_DISPOSITION_NOT_IMPL
:
* get destroyed?
printk
(
KERN_WARNING
"sctp unimplemented feature in state %d, "
*/
"event_type %d, event_id %d
\n
"
,
if
(
transport
->
dead
)
state
,
event_type
,
subtype
.
chunk
);
goto
out_unloc
k
;
brea
k
;
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
case
SCTP_DISPOSITION_BUG
:
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_HEARTBEAT
),
printk
(
KERN_ERR
"sctp bug in state %d, "
asoc
->
state
,
"event_type %d, event_id %d
\n
"
,
asoc
->
ep
,
asoc
,
state
,
event_type
,
subtype
.
chunk
);
transport
,
GFP_ATOMIC
);
BUG
();
break
;
if
(
error
)
default:
asoc
->
base
.
sk
->
err
=
-
error
;
printk
(
KERN_ERR
"sctp impossible disposition %d "
"in state %d, event_type %d, event_id %d
\n
"
,
status
,
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
};
out_unlock:
bail:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
return
error
;
sctp_transport_put
(
transport
);
}
/* Inject a SACK Timeout event into the state machine. */
void
sctp_generate_sack_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_SACK
);
}
}
sctp_timer_event_t
*
sctp_timer_events
[
SCTP_NUM_TIMEOUT_TYPES
]
=
{
NULL
,
sctp_generate_t1_cookie_event
,
sctp_generate_t1_init_event
,
sctp_generate_t2_shutdown_event
,
NULL
,
sctp_generate_t5_shutdown_guard_event
,
sctp_generate_heartbeat_event
,
sctp_generate_sack_event
,
sctp_generate_autoclose_event
,
};
/********************************************************************
/********************************************************************
*
3r
d Level Abstractions
*
2n
d Level Abstractions
********************************************************************/
********************************************************************/
/* RFC 2960 8.2 Path Failure Detection
/* This is the side-effect interpreter. */
*
int
sctp_cmd_interpreter
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
* When its peer endpoint is multi-homed, an endpoint should keep a
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
* error counter for each of the destination transport addresses of the
sctp_association_t
*
asoc
,
void
*
event_arg
,
* peer endpoint.
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
*
int
priority
)
* Each time the T3-rtx timer expires on any address, or when a
* HEARTBEAT sent to an idle address is not acknowledged within a RTO,
* the error counter of that destination address will be incremented.
* When the value in the error counter exceeds the protocol parameter
* 'Path.Max.Retrans' of that destination address, the endpoint should
* mark the destination transport address as inactive, and a
* notification SHOULD be sent to the upper layer.
*
*/
static
void
sctp_do_8_2_transport_strike
(
sctp_association_t
*
asoc
,
struct
sctp_transport
*
transport
)
{
{
/* The check for association's overall error counter exceeding the
int
error
=
0
;
* threshold is done in the state function.
int
force
;
*/
sctp_cmd_t
*
cmd
;
asoc
->
overall_error_count
++
;
sctp_chunk_t
*
new_obj
;
sctp_chunk_t
*
chunk
=
NULL
;
struct
sctp_packet
*
packet
;
struct
list_head
*
pos
;
struct
timer_list
*
timer
;
unsigned
long
timeout
;
struct
sctp_transport
*
t
;
sctp_sackhdr_t
sackh
;
if
(
transport
->
active
&&
if
(
SCTP_EVENT_T_TIMEOUT
!=
event_type
)
(
transport
->
error_count
++
>=
transport
->
error_threshold
))
{
chunk
=
(
sctp_chunk_t
*
)
event_arg
;
SCTP_DEBUG_PRINTK
(
"transport_strike: transport "
"IP:%d.%d.%d.%d failed.
\n
"
,
NIPQUAD
(
transport
->
ipaddr
.
v4
.
sin_addr
));
sctp_assoc_control_transport
(
asoc
,
transport
,
SCTP_TRANSPORT_DOWN
,
SCTP_FAILED_THRESHOLD
);
}
/* E2) For the destination address for which the timer
/* Note: This whole file is a huge candidate for rework.
* expires, set RTO <- RTO * 2 ("back off the timer"). The
* For example, each command could either have its own handler, so
* maximum value discussed in rule C7 above (RTO.max) may be
* the loop would look like:
* used to provide an upper bound to this doubling operation.
* while (cmds)
* cmd->handle(x, y, z)
* --jgrimm
*/
*/
transport
->
rto
=
min
((
transport
->
rto
*
2
),
transport
->
asoc
->
rto_max
);
while
(
NULL
!=
(
cmd
=
sctp_next_cmd
(
commands
)))
{
}
switch
(
cmd
->
verb
)
{
case
SCTP_CMD_NOP
:
/* Do nothing. */
break
;
/* Worker routine to handle INIT command failure. */
case
SCTP_CMD_NEW_ASOC
:
static
void
sctp_cmd_init_failed
(
sctp_cmd_seq_t
*
commands
,
/* Register a new association. */
sctp_association_t
*
asoc
)
asoc
=
cmd
->
obj
.
ptr
;
{
/* Register with the endpoint. */
struct
sctp_ulpevent
*
event
;
sctp_endpoint_add_asoc
(
ep
,
asoc
);
sctp_hash_established
(
asoc
);
break
;
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
case
SCTP_CMD_UPDATE_ASSOC
:
0
,
sctp_assoc_update
(
asoc
,
cmd
->
obj
.
ptr
);
SCTP_CANT_STR_ASSOC
,
break
;
0
,
0
,
0
,
GFP_ATOMIC
);
if
(
event
)
case
SCTP_CMD_PURGE_OUTQUEUE
:
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
sctp_outq_teardown
(
&
asoc
->
outqueue
);
SCTP_ULPEVENT
(
event
))
;
break
;
/* FIXME: We need to handle data possibly either
case
SCTP_CMD_DELETE_TCB
:
* sent via COOKIE-ECHO bundling or just waiting in
/* Delete the current association. */
* the transmit queue, if the user has enabled
sctp_unhash_established
(
asoc
);
* SEND_FAILED notifications.
sctp_association_free
(
asoc
);
*/
asoc
=
NULL
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
break
;
}
/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
case
SCTP_CMD_NEW_STATE
:
static
void
sctp_cmd_assoc_failed
(
sctp_cmd_seq_t
*
commands
,
/* Enter a new state. */
sctp_association_t
*
asoc
,
sctp_cmd_new_state
(
commands
,
asoc
,
cmd
->
obj
.
state
);
sctp_event_t
event_type
,
break
;
sctp_subtype_t
subtype
,
sctp_chunk_t
*
chunk
)
{
struct
sctp_ulpevent
*
event
;
__u16
error
=
0
;
switch
(
event_type
)
{
case
SCTP_CMD_REPORT_TSN
:
case
SCTP_EVENT_T_PRIMITIVE
:
/* Record the arrival of a TSN. */
if
(
SCTP_PRIMITIVE_ABORT
==
subtype
.
primitive
)
sctp_tsnmap_mark
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
error
=
SCTP_ERROR_USER_ABORT
;
break
;
break
;
case
SCTP_EVENT_T_CHUNK
:
if
(
chunk
&&
(
SCTP_CID_ABORT
==
chunk
->
chunk_hdr
->
type
)
&&
(
ntohs
(
chunk
->
chunk_hdr
->
length
)
>=
(
sizeof
(
struct
sctp_chunkhdr
)
+
sizeof
(
struct
sctp_errhdr
))))
{
error
=
((
sctp_errhdr_t
*
)
chunk
->
skb
->
data
)
->
cause
;
}
break
;
default:
break
;
}
/* Cancel any partial delivery in progress. */
case
SCTP_CMD_GEN_SACK
:
sctp_ulpq_abort_pd
(
&
asoc
->
ulpq
,
GFP_ATOMIC
);
/* Generate a Selective ACK.
* The argument tells us whether to just count
* the packet and MAYBE generate a SACK, or
* force a SACK out.
*/
force
=
cmd
->
obj
.
i32
;
error
=
sctp_gen_sack
(
asoc
,
force
,
commands
);
break
;
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_COMM_LOST
,
case
SCTP_CMD_PROCESS_SACK
:
error
,
0
,
0
,
GFP_ATOMIC
);
/* Process an inbound SACK. */
if
(
event
)
error
=
sctp_cmd_process_sack
(
commands
,
asoc
,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
cmd
->
obj
.
ptr
);
SCTP_ULPEVENT
(
event
))
;
break
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_STATE
,
case
SCTP_CMD_GEN_INIT_ACK
:
SCTP_STATE
(
SCTP_STATE_CLOSED
));
/* Generate an INIT ACK chunk. */
new_obj
=
sctp_make_init_ack
(
asoc
,
chunk
,
GFP_ATOMIC
,
0
);
if
(
!
new_obj
)
goto
nomem
;
/* FIXME: We need to handle data that could not be sent or was not
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
* acked, if the user has enabled SEND_FAILED notifications.
SCTP_CHUNK
(
new_obj
));
*/
break
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
}
/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
case
SCTP_CMD_PEER_INIT
:
* inside the cookie. In reality, this is only used for INIT-ACK processing
/* Process a unified INIT from the peer.
* since all other cases use "temporary" associations and can do all
* Note: Only used during INIT-ACK processing. If
* their work in statefuns directly.
* there is an error just return to the outter
*/
* layer which will bail.
static
int
sctp_cmd_process_init
(
sctp_cmd_seq_t
*
commands
,
*/
sctp_association_t
*
asoc
,
error
=
sctp_cmd_process_init
(
commands
,
asoc
,
chunk
,
sctp_chunk_t
*
chunk
,
cmd
->
obj
.
ptr
,
priority
);
sctp_init_chunk_t
*
peer_init
,
break
;
int
priority
)
{
int
error
;
/* We only process the init as a sideeffect in a single
case
SCTP_CMD_GEN_COOKIE_ECHO
:
* case. This is when we process the INIT-ACK. If we
/* Generate a COOKIE ECHO chunk. */
* fail during INIT processing (due to malloc problems),
new_obj
=
sctp_make_cookie_echo
(
asoc
,
chunk
);
* just return the error and stop processing the stack.
if
(
!
new_obj
)
{
*/
if
(
cmd
->
obj
.
ptr
)
sctp_free_chunk
(
cmd
->
obj
.
ptr
);
goto
nomem
;
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
if
(
!
sctp_process_init
(
asoc
,
chunk
->
chunk_hdr
->
type
,
/* If there is an ERROR chunk to be sent along with
sctp_source
(
chunk
),
peer_init
,
* the COOKIE_ECHO, send it, too.
priority
))
*/
error
=
-
ENOMEM
;
if
(
cmd
->
obj
.
ptr
)
else
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
error
=
0
;
SCTP_CHUNK
(
cmd
->
obj
.
ptr
));
break
;
case
SCTP_CMD_GEN_SHUTDOWN
:
/* Generate SHUTDOWN when in SHUTDOWN_SENT state.
* Reset error counts.
*/
asoc
->
overall_error_count
=
0
;
/* Generate a SHUTDOWN chunk. */
new_obj
=
sctp_make_shutdown
(
asoc
);
if
(
!
new_obj
)
goto
nomem
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
case
SCTP_CMD_CHUNK_ULP
:
/* Send a chunk to the sockets layer. */
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
"chunk_up:"
,
cmd
->
obj
.
ptr
,
"ulpq:"
,
&
asoc
->
ulpq
);
sctp_ulpq_tail_data
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
case
SCTP_CMD_EVENT_ULP
:
/* Send a notification to the sockets layer. */
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
"event_up:"
,
cmd
->
obj
.
ptr
,
"ulpq:"
,
&
asoc
->
ulpq
);
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
);
break
;
case
SCTP_CMD_REPLY
:
/* Send a chunk to our peer. */
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
cmd
->
obj
.
ptr
);
break
;
case
SCTP_CMD_SEND_PKT
:
/* Send a full packet to our peer. */
packet
=
cmd
->
obj
.
ptr
;
sctp_packet_transmit
(
packet
);
sctp_ootb_pkt_free
(
packet
);
break
;
case
SCTP_CMD_RETRAN
:
/* Mark a transport for retransmission. */
sctp_retransmit
(
&
asoc
->
outqueue
,
cmd
->
obj
.
transport
,
SCTP_RTXR_T3_RTX
);
break
;
case
SCTP_CMD_TRANSMIT
:
/* Kick start transmission. */
error
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
break
;
case
SCTP_CMD_ECN_CE
:
/* Do delayed CE processing. */
sctp_do_ecn_ce_work
(
asoc
,
cmd
->
obj
.
u32
);
break
;
case
SCTP_CMD_ECN_ECNE
:
/* Do delayed ECNE processing. */
new_obj
=
sctp_do_ecn_ecne_work
(
asoc
,
cmd
->
obj
.
u32
,
chunk
);
if
(
new_obj
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
case
SCTP_CMD_ECN_CWR
:
/* Do delayed CWR processing. */
sctp_do_ecn_cwr_work
(
asoc
,
cmd
->
obj
.
u32
);
break
;
case
SCTP_CMD_SETUP_T2
:
sctp_cmd_setup_t2
(
commands
,
asoc
,
cmd
->
obj
.
ptr
);
break
;
case
SCTP_CMD_TIMER_START
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
if
(
!
timeout
)
BUG
();
timer
->
expires
=
jiffies
+
timeout
;
sctp_association_hold
(
asoc
);
add_timer
(
timer
);
break
;
return
error
;
case
SCTP_CMD_TIMER_RESTART
:
}
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
if
(
!
mod_timer
(
timer
,
jiffies
+
timeout
))
sctp_association_hold
(
asoc
);
break
;
/* Helper function to break out starting up of heartbeat timers. */
case
SCTP_CMD_TIMER_STOP
:
static
void
sctp_cmd_hb_timers_start
(
sctp_cmd_seq_t
*
cmds
,
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
sctp_association_t
*
asoc
)
if
(
timer_pending
(
timer
)
&&
del_timer
(
timer
))
{
sctp_association_put
(
asoc
);
struct
sctp_transport
*
t
;
break
;
struct
list_head
*
pos
;
/* Start a heartbeat timer for each transport on the association.
case
SCTP_CMD_INIT_RESTART
:
* hold a reference on the transport to make sure none of
/* Do the needed accounting and updates
* the needed data structures go away.
* associated with restarting an initialization
*/
* timer.
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
*/
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
asoc
->
counters
[
SCTP_COUNTER_INIT_ERROR
]
++
;
asoc
->
timeouts
[
cmd
->
obj
.
to
]
*=
2
;
if
(
asoc
->
timeouts
[
cmd
->
obj
.
to
]
>
asoc
->
max_init_timeo
)
{
asoc
->
timeouts
[
cmd
->
obj
.
to
]
=
asoc
->
max_init_timeo
;
}
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
/* If we've sent any data bundled with
sctp_transport_hold
(
t
);
* COOKIE-ECHO we need to resend.
}
*/
}
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
sctp_retransmit_mark
(
&
asoc
->
outqueue
,
t
,
0
);
}
static
void
sctp_cmd_hb_timers_stop
(
sctp_cmd_seq_t
*
cmds
,
sctp_add_cmd_sf
(
commands
,
sctp_association_t
*
asoc
)
SCTP_CMD_TIMER_RESTART
,
{
SCTP_TO
(
cmd
->
obj
.
to
));
struct
sctp_transport
*
t
;
break
;
struct
list_head
*
pos
;
/* Stop all heartbeat timers. */
case
SCTP_CMD_INIT_FAILED
:
sctp_cmd_init_failed
(
commands
,
asoc
);
break
;
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
case
SCTP_CMD_ASSOC_FAILED
:
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
sctp_cmd_assoc_failed
(
commands
,
asoc
,
event_type
,
if
(
del_timer
(
&
t
->
hb_timer
))
subtype
,
chunk
);
sctp_transport_put
(
t
);
break
;
}
}
/* Helper function to update the heartbeat timer. */
case
SCTP_CMD_COUNTER_INC
:
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
cmds
,
asoc
->
counters
[
cmd
->
obj
.
counter
]
++
;
sctp_association_t
*
asoc
,
break
;
struct
sctp_transport
*
t
)
{
/* Update the heartbeat timer. */
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
sctp_transport_hold
(
t
);
}
/* Helper function to handle the reception of an HEARTBEAT ACK. */
case
SCTP_CMD_COUNTER_RESET
:
static
void
sctp_cmd_transport_on
(
sctp_cmd_seq_t
*
cmds
,
asoc
->
counters
[
cmd
->
obj
.
counter
]
=
0
;
sctp_association_t
*
asoc
,
break
;
struct
sctp_transport
*
t
,
sctp_chunk_t
*
chunk
)
{
sctp_sender_hb_info_t
*
hbinfo
;
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
case
SCTP_CMD_REPORT_DUP
:
* HEARTBEAT should clear the error counter of the destination
sctp_tsnmap_mark_dup
(
&
asoc
->
peer
.
tsn_map
,
* transport address to which the HEARTBEAT was sent.
cmd
->
obj
.
u32
);
* The association's overall error count is also cleared.
break
;
*/
t
->
error_count
=
0
;
t
->
asoc
->
overall_error_count
=
0
;
/* Mark the destination transport address as active if it is not so
case
SCTP_CMD_REPORT_BAD_TAG
:
* marked.
SCTP_DEBUG_PRINTK
(
"vtag mismatch!
\n
"
);
*/
break
;
if
(
!
t
->
active
)
sctp_assoc_control_transport
(
asoc
,
t
,
SCTP_TRANSPORT_UP
,
SCTP_HEARTBEAT_SUCCESS
);
/* The receiver of the HEARTBEAT ACK should also perform an
case
SCTP_CMD_STRIKE
:
* RTT measurement for that destination transport address
/* Mark one strike against a transport. */
* using the time value carried in the HEARTBEAT ACK chunk.
sctp_do_8_2_transport_strike
(
asoc
,
cmd
->
obj
.
transport
);
*/
break
;
hbinfo
=
(
sctp_sender_hb_info_t
*
)
chunk
->
skb
->
data
;
sctp_transport_update_rto
(
t
,
(
jiffies
-
hbinfo
->
sent_at
));
}
/* Helper function to do a transport reset at the expiry of the hearbeat
case
SCTP_CMD_TRANSPORT_RESET
:
* timer.
t
=
cmd
->
obj
.
transport
;
*/
sctp_cmd_transport_reset
(
commands
,
asoc
,
t
);
static
void
sctp_cmd_transport_reset
(
sctp_cmd_seq_t
*
cmds
,
break
;
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
{
sctp_transport_lower_cwnd
(
t
,
SCTP_LOWER_CWND_INACTIVE
);
/* Mark one strike against a transport. */
case
SCTP_CMD_TRANSPORT_ON
:
sctp_do_8_2_transport_strike
(
asoc
,
t
);
t
=
cmd
->
obj
.
transport
;
}
sctp_cmd_transport_on
(
commands
,
asoc
,
t
,
chunk
);
break
;
/* Helper function to process the process SACK command. */
case
SCTP_CMD_HB_TIMERS_START
:
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
cmds
,
sctp_cmd_hb_timers_start
(
commands
,
asoc
);
sctp_association_t
*
asoc
,
break
;
sctp_sackhdr_t
*
sackh
)
{
int
err
;
if
(
sctp_outq_sack
(
&
asoc
->
outqueue
,
sackh
))
{
case
SCTP_CMD_HB_TIMER_UPDATE
:
/* There are no more TSNs awaiting SACK. */
t
=
cmd
->
obj
.
transport
;
err
=
sctp_do_sm
(
SCTP_EVENT_T_OTHER
,
sctp_cmd_hb_timer_update
(
commands
,
asoc
,
t
);
SCTP_ST_OTHER
(
SCTP_EVENT_NO_PENDING_TSN
),
break
;
asoc
->
state
,
asoc
->
ep
,
asoc
,
NULL
,
GFP_ATOMIC
);
}
else
{
/* Windows may have opened, so we need
* to check if we have DATA to transmit
*/
err
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
}
return
err
;
case
SCTP_CMD_HB_TIMERS_STOP
:
}
sctp_cmd_hb_timers_stop
(
commands
,
asoc
);
break
;
/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
case
SCTP_CMD_REPORT_ERROR
:
* the transport for a shutdown chunk.
error
=
cmd
->
obj
.
error
;
*/
break
;
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
)
{
struct
sctp_transport
*
t
;
t
=
sctp_assoc_choose_shutdown_transport
(
asoc
);
case
SCTP_CMD_PROCESS_CTSN
:
asoc
->
shutdown_last_sent_to
=
t
;
/* Dummy up a SACK for processing. */
asoc
->
timeouts
[
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
]
=
t
->
rto
;
sackh
.
cum_tsn_ack
=
cmd
->
obj
.
u32
;
chunk
->
transport
=
t
;
sackh
.
a_rwnd
=
0
;
}
sackh
.
num_gap_ack_blocks
=
0
;
sackh
.
num_dup_tsns
=
0
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_PROCESS_SACK
,
SCTP_SACKH
(
&
sackh
));
break
;
/* Helper function to change the state of an association. */
case
SCTP_CMD_DISCARD_PACKET
:
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
/* We need to discard the whole packet. */
sctp_state_t
state
)
chunk
->
pdiscard
=
1
;
{
break
;
struct
sock
*
sk
=
asoc
->
base
.
sk
;
case
SCTP_CMD_RTO_PENDING
:
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
t
=
cmd
->
obj
.
transport
;
t
->
rto_pending
=
1
;
break
;
asoc
->
state
=
state
;
case
SCTP_CMD_PART_DELIVER
:
asoc
->
state_timestamp
=
jiffies
;
sctp_ulpq_partial_delivery
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
||
case
SCTP_CMD_RENEGE
:
(
SCTP_STATE_CLOSED
==
asoc
->
state
))
{
sctp_ulpq_renege
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
/* Wake up any processes waiting in the asoc's wait queue in
GFP_ATOMIC
);
* sctp_wait_for_connect() or sctp_wait_for_sndbuf().
break
;
*/
if
(
waitqueue_active
(
&
asoc
->
wait
))
wake_up_interruptible
(
&
asoc
->
wait
);
/* Wake up any processes waiting in the sk's sleep queue of
default:
* a TCP-style or UDP-style peeled-off socket in
printk
(
KERN_WARNING
"Impossible command: %u, %p
\n
"
,
* sctp_wait_for_accept() or sctp_wait_for_packet().
cmd
->
verb
,
cmd
->
obj
.
ptr
);
* For a UDP-style socket, the waiters are woken up by the
break
;
* notifications.
};
*/
if
(
error
)
if
(
SCTP_SOCKET_UDP
!=
sp
->
type
)
return
error
;
sk
->
state_change
(
sk
);
}
}
/* Change the sk->state of a TCP-style socket that has sucessfully
return
error
;
* completed a connect() call.
*/
nomem:
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
&&
error
=
-
ENOMEM
;
(
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_CLOSED
==
sk
->
state
))
return
error
;
sk
->
state
=
SCTP_SS_ESTABLISHED
;
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment