Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
beca2c15
Commit
beca2c15
authored
Apr 28, 2003
by
Jon Grimm
Browse files
Options
Browse Files
Download
Plain Diff
Merge
http://linux-lksctp.bkbits.net/lksctp-2.5.work
into touki.austin.ibm.com:/home/jgrimm/bk/lksctp-2.5.work
parents
061a9a85
3db0b603
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
57 additions
and
271 deletions
+57
-271
include/net/sctp/structs.h
include/net/sctp/structs.h
+3
-2
net/sctp/chunk.c
net/sctp/chunk.c
+37
-2
net/sctp/output.c
net/sctp/output.c
+9
-15
net/sctp/outqueue.c
net/sctp/outqueue.c
+7
-252
net/sctp/socket.c
net/sctp/socket.c
+1
-0
No files found.
include/net/sctp/structs.h
View file @
beca2c15
...
...
@@ -459,8 +459,9 @@ struct sctp_datamsg {
struct
list_head
track
;
/* Reference counting. */
atomic_t
refcnt
;
/* Have the SEND_FAILED notifications been done. */
__u8
notify_done
;
/* Did the messenge fail to send? */
int
send_error
;
char
send_failed
;
};
struct
sctp_datamsg
*
sctp_datamsg_from_user
(
struct
sctp_association
*
,
...
...
net/sctp/chunk.c
View file @
beca2c15
...
...
@@ -53,7 +53,8 @@
void
sctp_datamsg_init
(
struct
sctp_datamsg
*
msg
)
{
atomic_set
(
&
msg
->
refcnt
,
1
);
msg
->
notify_done
=
0
;
msg
->
send_failed
=
0
;
msg
->
send_error
=
0
;
INIT_LIST_HEAD
(
&
msg
->
chunks
);
INIT_LIST_HEAD
(
&
msg
->
track
);
}
...
...
@@ -74,11 +75,45 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
{
struct
list_head
*
pos
,
*
temp
;
struct
sctp_chunk
*
chunk
;
struct
sctp_opt
*
sp
;
struct
sctp_ulpevent
*
ev
;
struct
sctp_association
*
asoc
;
int
error
,
notify
;
/* Release all references, if there are any left. */
/* If we failed, we may need to notify. */
notify
=
msg
->
send_failed
?
-
1
:
0
;
/* Release all references. */
list_for_each_safe
(
pos
,
temp
,
&
msg
->
track
)
{
list_del
(
pos
);
chunk
=
list_entry
(
pos
,
struct
sctp_chunk
,
frag_list
);
/* Check whether we _really_ need to notify. */
if
(
notify
<
0
)
{
asoc
=
chunk
->
asoc
;
if
(
msg
->
send_error
)
error
=
msg
->
send_error
;
else
error
=
asoc
->
outqueue
.
error
;
sp
=
sctp_sk
(
asoc
->
base
.
sk
);
notify
=
sctp_ulpevent_type_enabled
(
SCTP_SEND_FAILED
,
&
sp
->
subscribe
);
}
/* Generate a SEND FAILED event only if enabled. */
if
(
notify
>
0
)
{
int
sent
;
if
(
chunk
->
has_tsn
)
sent
=
SCTP_DATA_SENT
;
else
sent
=
SCTP_DATA_UNSENT
;
ev
=
sctp_ulpevent_make_send_failed
(
asoc
,
chunk
,
sent
,
error
,
GFP_ATOMIC
);
if
(
ev
)
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
ev
);
}
sctp_chunk_put
(
chunk
);
}
...
...
net/sctp/output.c
View file @
beca2c15
...
...
@@ -219,21 +219,16 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
/* Both control chunks and data chunks with TSNs are
* non-fragmentable.
*/
int
fragmentable
=
sctp_chunk_is_data
(
chunk
)
&&
(
!
chunk
->
has_tsn
);
if
(
packet_empty
)
{
if
(
fragmentable
)
{
retval
=
SCTP_XMIT_MUST_FRAG
;
goto
finish
;
}
else
{
/* The packet is too big but we can
* not fragment it--we have to just
* transmit and rely on IP
* fragmentation.
*/
packet
->
ipfragok
=
1
;
goto
append
;
}
/* We no longer do refragmentation at all.
* Just fragment at the IP layer, if we
* actually hit this condition
*/
packet
->
ipfragok
=
1
;
goto
append
;
}
else
{
/* !packet_empty */
retval
=
SCTP_XMIT_PMTU_FULL
;
goto
finish
;
...
...
@@ -374,7 +369,6 @@ int sctp_packet_transmit(struct sctp_packet *packet)
chunk
->
rtt_in_progress
=
1
;
tp
->
rto_pending
=
1
;
}
sctp_datamsg_track
(
chunk
);
}
else
chunk
->
resent
=
1
;
...
...
net/sctp/outqueue.c
View file @
beca2c15
...
...
@@ -243,7 +243,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
struct
sctp_transport
*
transport
;
struct
list_head
*
lchunk
,
*
pos
,
*
temp
;
struct
sctp_chunk
*
chunk
;
struct
sctp_ulpevent
*
ev
;
/* Throw away unacknowledged chunks. */
list_for_each
(
pos
,
&
q
->
asoc
->
peer
.
transport_addr_list
)
{
...
...
@@ -251,14 +250,9 @@ void sctp_outq_teardown(struct sctp_outq *q)
while
((
lchunk
=
sctp_list_dequeue
(
&
transport
->
transmitted
)))
{
chunk
=
list_entry
(
lchunk
,
struct
sctp_chunk
,
transmitted_list
);
/* Mark as part of a failed message. */
chunk
->
msg
->
send_failed
=
1
;
/* Generate a SEND FAILED event. */
ev
=
sctp_ulpevent_make_send_failed
(
q
->
asoc
,
chunk
,
SCTP_DATA_SENT
,
q
->
error
,
GFP_ATOMIC
);
if
(
ev
)
sctp_ulpq_tail_event
(
&
q
->
asoc
->
ulpq
,
ev
);
sctp_chunk_free
(
chunk
);
}
}
...
...
@@ -282,13 +276,8 @@ void sctp_outq_teardown(struct sctp_outq *q)
/* Throw away any leftover data chunks. */
while
((
chunk
=
sctp_outq_dequeue_data
(
q
)))
{
/* Generate a SEND FAILED event. */
ev
=
sctp_ulpevent_make_send_failed
(
q
->
asoc
,
chunk
,
SCTP_DATA_UNSENT
,
q
->
error
,
GFP_ATOMIC
);
if
(
ev
)
sctp_ulpq_tail_event
(
&
q
->
asoc
->
ulpq
,
ev
);
/* Mark as send failure. */
chunk
->
msg
->
send_failed
=
1
;
sctp_chunk_free
(
chunk
);
}
...
...
@@ -609,215 +598,6 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
return
error
;
}
/* This routine either transmits the fragment or puts it on the output
* queue. 'pos' points to the next chunk in the output queue after the
* chunk that is currently in the process of fragmentation.
*/
void
sctp_xmit_frag
(
struct
sctp_outq
*
q
,
struct
sctp_chunk
*
pos
,
struct
sctp_packet
*
packet
,
struct
sctp_chunk
*
frag
,
__u32
tsn
)
{
struct
sctp_transport
*
transport
=
packet
->
transport
;
struct
sk_buff_head
*
queue
=
&
q
->
out
;
sctp_xmit_t
status
;
int
error
;
frag
->
subh
.
data_hdr
->
tsn
=
htonl
(
tsn
);
frag
->
has_tsn
=
1
;
/* An inner fragment may be smaller than the earlier one and may get
* in if we call q->build_output. This ensures that all the fragments
* are sent in order.
*/
if
(
!
skb_queue_empty
(
queue
))
{
SCTP_DEBUG_PRINTK
(
"sctp_xmit_frag: q not empty. "
"adding 0x%x to outqueue
\n
"
,
ntohl
(
frag
->
subh
.
data_hdr
->
tsn
));
if
(
pos
)
sctp_outq_insert_data
(
q
,
frag
,
pos
);
else
sctp_outq_tail_data
(
q
,
frag
);
return
;
}
/* Add the chunk fragment to the packet. */
status
=
(
*
q
->
build_output
)(
packet
,
frag
);
switch
(
status
)
{
case
SCTP_XMIT_RWND_FULL
:
/* RWND is full, so put the chunk in the output queue. */
SCTP_DEBUG_PRINTK
(
"sctp_xmit_frag: rwnd full. "
"adding 0x%x to outqueue
\n
"
,
ntohl
(
frag
->
subh
.
data_hdr
->
tsn
));
if
(
pos
)
sctp_outq_insert_data
(
q
,
frag
,
pos
);
else
sctp_outq_tail_data
(
q
,
frag
);
break
;
case
SCTP_XMIT_OK
:
error
=
(
*
q
->
force_output
)(
packet
);
if
(
error
<
0
)
{
/* Packet could not be transmitted, put the chunk in
* the output queue
*/
SCTP_DEBUG_PRINTK
(
"sctp_xmit_frag: force output "
"failed. adding 0x%x to outqueue
\n
"
,
ntohl
(
frag
->
subh
.
data_hdr
->
tsn
));
if
(
pos
)
sctp_outq_insert_data
(
q
,
frag
,
pos
);
else
sctp_outq_tail_data
(
q
,
frag
);
}
else
{
SCTP_DEBUG_PRINTK
(
"sctp_xmit_frag: force output "
"success. 0x%x sent
\n
"
,
ntohl
(
frag
->
subh
.
data_hdr
->
tsn
));
list_add_tail
(
&
frag
->
transmitted_list
,
&
transport
->
transmitted
);
sctp_transport_reset_timers
(
transport
);
}
break
;
default:
BUG
();
};
}
/* This routine calls sctp_xmit_frag() for all the fragments of a message.
* The argument 'frag' point to the first fragment and it holds the list
* of all the other fragments in the 'frag_list' field.
*/
void
sctp_xmit_fragmented_chunks
(
struct
sctp_outq
*
q
,
struct
sctp_packet
*
pkt
,
struct
sctp_chunk
*
frag
)
{
struct
sctp_association
*
asoc
=
frag
->
asoc
;
struct
list_head
*
lfrag
,
*
frag_list
;
__u32
tsn
;
int
nfrags
=
1
;
struct
sctp_chunk
*
pos
;
/* Count the number of fragments. */
frag_list
=
&
frag
->
frag_list
;
list_for_each
(
lfrag
,
frag_list
)
{
nfrags
++
;
}
/* Get a TSN block of nfrags TSNs. */
tsn
=
sctp_association_get_tsn_block
(
asoc
,
nfrags
);
pos
=
(
struct
sctp_chunk
*
)
skb_peek
(
&
q
->
out
);
/* Transmit the first fragment. */
sctp_xmit_frag
(
q
,
pos
,
pkt
,
frag
,
tsn
++
);
/* Transmit the rest of fragments. */
frag_list
=
&
frag
->
frag_list
;
list_for_each
(
lfrag
,
frag_list
)
{
frag
=
list_entry
(
lfrag
,
struct
sctp_chunk
,
frag_list
);
sctp_xmit_frag
(
q
,
pos
,
pkt
,
frag
,
tsn
++
);
}
}
/* This routine breaks the given chunk into 'max_frag_data_len' size
* fragments. It returns the first fragment with the frag_list field holding
* the remaining fragments.
*/
struct
sctp_chunk
*
sctp_fragment_chunk
(
struct
sctp_chunk
*
chunk
,
size_t
max_frag_data_len
)
{
struct
sctp_association
*
asoc
=
chunk
->
asoc
;
void
*
data_ptr
=
chunk
->
subh
.
data_hdr
;
struct
sctp_sndrcvinfo
*
sinfo
=
&
chunk
->
sinfo
;
__u16
chunk_data_len
=
sctp_data_size
(
chunk
);
__u16
ssn
=
ntohs
(
chunk
->
subh
.
data_hdr
->
ssn
);
struct
sctp_chunk
*
first_frag
,
*
frag
;
struct
list_head
*
frag_list
;
int
nfrags
;
__u8
old_flags
,
flags
;
/* nfrags = no. of max size fragments + any smaller last fragment. */
nfrags
=
((
chunk_data_len
/
max_frag_data_len
)
+
((
chunk_data_len
%
max_frag_data_len
)
?
1
:
0
));
/* Start of the data in the chunk. */
data_ptr
+=
sizeof
(
sctp_datahdr_t
);
/* Are we fragmenting an already fragmented large message? */
old_flags
=
chunk
->
chunk_hdr
->
flags
;
if
(
old_flags
&
SCTP_DATA_FIRST_FRAG
)
flags
=
SCTP_DATA_FIRST_FRAG
;
else
flags
=
SCTP_DATA_MIDDLE_FRAG
;
/* Make the first fragment. */
first_frag
=
sctp_make_datafrag
(
asoc
,
sinfo
,
max_frag_data_len
,
data_ptr
,
flags
,
ssn
);
if
(
!
first_frag
)
goto
err
;
sctp_datamsg_assign
(
chunk
->
msg
,
first_frag
);
first_frag
->
has_ssn
=
1
;
/* All the fragments are added to the frag_list of the first chunk. */
frag_list
=
&
first_frag
->
frag_list
;
chunk_data_len
-=
max_frag_data_len
;
data_ptr
+=
max_frag_data_len
;
/* Make the middle fragments. */
while
(
chunk_data_len
>
max_frag_data_len
)
{
frag
=
sctp_make_datafrag
(
asoc
,
sinfo
,
max_frag_data_len
,
data_ptr
,
SCTP_DATA_MIDDLE_FRAG
,
ssn
);
if
(
!
frag
)
goto
err
;
sctp_datamsg_assign
(
chunk
->
msg
,
frag
);
frag
->
has_ssn
=
1
;
/* Add the middle fragment to the first fragment's
* frag_list.
*/
list_add_tail
(
&
frag
->
frag_list
,
frag_list
);
chunk_data_len
-=
max_frag_data_len
;
data_ptr
+=
max_frag_data_len
;
}
if
(
old_flags
&
SCTP_DATA_LAST_FRAG
)
flags
=
SCTP_DATA_LAST_FRAG
;
else
flags
=
SCTP_DATA_MIDDLE_FRAG
;
/* Make the last fragment. */
frag
=
sctp_make_datafrag
(
asoc
,
sinfo
,
chunk_data_len
,
data_ptr
,
flags
,
ssn
);
if
(
!
frag
)
goto
err
;
sctp_datamsg_assign
(
chunk
->
msg
,
frag
);
frag
->
has_ssn
=
1
;
/* Add the last fragment to the first fragment's frag_list. */
list_add_tail
(
&
frag
->
frag_list
,
frag_list
);
/* Free the original chunk. */
sctp_chunk_free
(
chunk
);
return
first_frag
;
err:
/* Free any fragments that are created before the failure. */
if
(
first_frag
)
{
struct
list_head
*
flist
,
*
lfrag
;
/* Free all the fragments off the first one. */
flist
=
&
first_frag
->
frag_list
;
while
(
NULL
!=
(
lfrag
=
sctp_list_dequeue
(
flist
)))
{
frag
=
list_entry
(
lfrag
,
struct
sctp_chunk
,
frag_list
);
sctp_chunk_free
(
frag
);
}
/* Free the first fragment. */
sctp_chunk_free
(
first_frag
);
}
return
NULL
;
}
/* Cork the outqueue so queued chunks are really queued. */
int
sctp_outq_uncork
(
struct
sctp_outq
*
q
)
{
...
...
@@ -1020,14 +800,9 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
*/
if
(
chunk
->
sinfo
.
sinfo_stream
>=
asoc
->
c
.
sinit_num_ostreams
)
{
struct
sctp_ulpevent
*
ev
;
/* Generate a SEND FAILED event. */
ev
=
sctp_ulpevent_make_send_failed
(
asoc
,
chunk
,
SCTP_DATA_UNSENT
,
SCTP_ERROR_INV_STRM
,
GFP_ATOMIC
);
if
(
ev
)
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
ev
);
/* Mark as failed send. */
chunk
->
msg
->
send_failed
=
1
;
chunk
->
msg
->
send_error
=
SCTP_ERROR_INV_STRM
;
/* Free the chunk. */
sctp_chunk_free
(
chunk
);
...
...
@@ -1097,26 +872,6 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
goto
sctp_flush_out
;
break
;
case
SCTP_XMIT_MUST_FRAG
:
{
struct
sctp_chunk
*
frag
;
frag
=
sctp_fragment_chunk
(
chunk
,
packet
->
transport
->
asoc
->
frag_point
);
if
(
!
frag
)
{
/* We could not fragment due to out of
* memory condition. Free the original
* chunk and return ENOMEM.
*/
sctp_chunk_free
(
chunk
);
error
=
-
ENOMEM
;
return
error
;
}
sctp_xmit_fragmented_chunks
(
q
,
packet
,
frag
);
goto
sctp_flush_out
;
break
;
}
case
SCTP_XMIT_OK
:
break
;
...
...
net/sctp/socket.c
View file @
beca2c15
...
...
@@ -1092,6 +1092,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
list_for_each_safe
(
pos
,
temp
,
&
datamsg
->
chunks
)
{
chunk
=
list_entry
(
pos
,
struct
sctp_chunk
,
frag_list
);
list_del_init
(
pos
);
sctp_datamsg_track
(
chunk
);
/* Do accounting for the write space. */
sctp_set_owner_w
(
chunk
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment