Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
5a470b1a
Commit
5a470b1a
authored
Mar 30, 2020
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge
git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
parents
3902baf9
01413176
Changes
12
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
115 additions
and
61 deletions
+115
-61
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hnae3.h
+1
-0
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+13
-5
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+8
-2
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+29
-25
net/core/skbuff.c
net/core/skbuff.c
+1
-0
net/ipv4/fib_trie.c
net/ipv4/fib_trie.c
+3
-0
net/ipv4/ip_tunnel.c
net/ipv4/ip_tunnel.c
+1
-5
net/ipv4/udp_offload.c
net/ipv4/udp_offload.c
+1
-0
net/mac80211/tx.c
net/mac80211/tx.c
+2
-1
net/sctp/ipv6.c
net/sctp/ipv6.c
+14
-6
net/sctp/protocol.c
net/sctp/protocol.c
+19
-9
net/sctp/socket.c
net/sctp/socket.c
+23
-8
No files found.
drivers/net/ethernet/hisilicon/hns3/hnae3.h
View file @
5a470b1a
...
...
@@ -78,6 +78,7 @@
enum
hns_desc_type
{
DESC_TYPE_SKB
,
DESC_TYPE_FRAGLIST_SKB
,
DESC_TYPE_PAGE
,
};
...
...
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
View file @
5a470b1a
...
...
@@ -1106,6 +1106,10 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
if
(
unlikely
(
ret
<
0
))
return
ret
;
dma
=
dma_map_single
(
dev
,
skb
->
data
,
size
,
DMA_TO_DEVICE
);
}
else
if
(
type
==
DESC_TYPE_FRAGLIST_SKB
)
{
struct
sk_buff
*
skb
=
(
struct
sk_buff
*
)
priv
;
dma
=
dma_map_single
(
dev
,
skb
->
data
,
size
,
DMA_TO_DEVICE
);
}
else
{
frag
=
(
skb_frag_t
*
)
priv
;
...
...
@@ -1144,8 +1148,9 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
desc_cb
->
priv
=
priv
;
desc_cb
->
dma
=
dma
+
HNS3_MAX_BD_SIZE
*
k
;
desc_cb
->
type
=
(
type
==
DESC_TYPE_SKB
&&
!
k
)
?
DESC_TYPE_SKB
:
DESC_TYPE_PAGE
;
desc_cb
->
type
=
((
type
==
DESC_TYPE_FRAGLIST_SKB
||
type
==
DESC_TYPE_SKB
)
&&
!
k
)
?
type
:
DESC_TYPE_PAGE
;
/* now, fill the descriptor */
desc
->
addr
=
cpu_to_le64
(
dma
+
HNS3_MAX_BD_SIZE
*
k
);
...
...
@@ -1354,7 +1359,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
ring_ptr_move_bw
(
ring
,
next_to_use
);
/* unmap the descriptor dma address */
if
(
ring
->
desc_cb
[
ring
->
next_to_use
].
type
==
DESC_TYPE_SKB
)
if
(
ring
->
desc_cb
[
ring
->
next_to_use
].
type
==
DESC_TYPE_SKB
||
ring
->
desc_cb
[
ring
->
next_to_use
].
type
==
DESC_TYPE_FRAGLIST_SKB
)
dma_unmap_single
(
dev
,
ring
->
desc_cb
[
ring
->
next_to_use
].
dma
,
ring
->
desc_cb
[
ring
->
next_to_use
].
length
,
...
...
@@ -1447,7 +1454,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
goto
out
;
skb_walk_frags
(
skb
,
frag_skb
)
{
ret
=
hns3_fill_skb_to_desc
(
ring
,
frag_skb
,
DESC_TYPE_PAGE
);
ret
=
hns3_fill_skb_to_desc
(
ring
,
frag_skb
,
DESC_TYPE_FRAGLIST_SKB
);
if
(
unlikely
(
ret
<
0
))
goto
fill_err
;
...
...
@@ -2356,7 +2364,7 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
static
void
hns3_unmap_buffer
(
struct
hns3_enet_ring
*
ring
,
struct
hns3_desc_cb
*
cb
)
{
if
(
cb
->
type
==
DESC_TYPE_SKB
)
if
(
cb
->
type
==
DESC_TYPE_SKB
||
cb
->
type
==
DESC_TYPE_FRAGLIST_SKB
)
dma_unmap_single
(
ring_to_dev
(
ring
),
cb
->
dma
,
cb
->
length
,
ring_to_dma_dir
(
ring
));
else
if
(
cb
->
length
)
...
...
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
View file @
5a470b1a
...
...
@@ -6768,7 +6768,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
struct
hclge_dev
*
hdev
=
vport
->
back
;
if
(
enable
)
{
hclge_task_schedule
(
hdev
,
round_jiffies_relative
(
HZ
)
);
hclge_task_schedule
(
hdev
,
0
);
}
else
{
/* Set the DOWN flag here to disable link updating */
set_bit
(
HCLGE_STATE_DOWN
,
&
hdev
->
state
);
...
...
@@ -8986,6 +8986,12 @@ static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
struct
hclge_vport
*
vport
=
hclge_get_vport
(
handle
);
struct
hclge_dev
*
hdev
=
vport
->
back
;
/* When nic is down, the service task is not running, doesn't update
* the port information per second. Query the port information before
* return the media type, ensure getting the correct media information.
*/
hclge_update_port_info
(
hdev
);
if
(
media_type
)
*
media_type
=
hdev
->
hw
.
mac
.
media_type
;
...
...
@@ -10674,7 +10680,7 @@ static int hclge_init(void)
{
pr_info
(
"%s is initializing
\n
"
,
HCLGE_NAME
);
hclge_wq
=
alloc_workqueue
(
"%s"
,
WQ_MEM_RECLAIM
,
0
,
HCLGE_NAME
);
hclge_wq
=
alloc_workqueue
(
"%s"
,
0
,
0
,
HCLGE_NAME
);
if
(
!
hclge_wq
)
{
pr_err
(
"%s: failed to create workqueue
\n
"
,
HCLGE_NAME
);
return
-
ENOMEM
;
...
...
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
View file @
5a470b1a
...
...
@@ -2149,50 +2149,51 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
return
ret
;
}
static
int
hclgevf_rss_init_hw
(
struct
hclgevf_dev
*
hdev
)
static
void
hclgevf_rss_init_cfg
(
struct
hclgevf_dev
*
hdev
)
{
struct
hclgevf_rss_cfg
*
rss_cfg
=
&
hdev
->
rss_cfg
;
int
ret
;
struct
hclgevf_rss_tuple_cfg
*
tuple_sets
;
u32
i
;
rss_cfg
->
hash_algo
=
HCLGEVF_RSS_HASH_ALGO_TOEPLITZ
;
rss_cfg
->
rss_size
=
hdev
->
nic
.
kinfo
.
rss_size
;
tuple_sets
=
&
rss_cfg
->
rss_tuple_sets
;
if
(
hdev
->
pdev
->
revision
>=
0x21
)
{
rss_cfg
->
hash_algo
=
HCLGEVF_RSS_HASH_ALGO_SIMPLE
;
memcpy
(
rss_cfg
->
rss_hash_key
,
hclgevf_hash_key
,
HCLGEVF_RSS_KEY_SIZE
);
tuple_sets
->
ipv4_tcp_en
=
HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
tuple_sets
->
ipv4_udp_en
=
HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
tuple_sets
->
ipv4_sctp_en
=
HCLGEVF_RSS_INPUT_TUPLE_SCTP
;
tuple_sets
->
ipv4_fragment_en
=
HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
tuple_sets
->
ipv6_tcp_en
=
HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
tuple_sets
->
ipv6_udp_en
=
HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
tuple_sets
->
ipv6_sctp_en
=
HCLGEVF_RSS_INPUT_TUPLE_SCTP
;
tuple_sets
->
ipv6_fragment_en
=
HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
}
/* Initialize RSS indirect table */
for
(
i
=
0
;
i
<
HCLGEVF_RSS_IND_TBL_SIZE
;
i
++
)
rss_cfg
->
rss_indirection_tbl
[
i
]
=
i
%
rss_cfg
->
rss_size
;
}
static
int
hclgevf_rss_init_hw
(
struct
hclgevf_dev
*
hdev
)
{
struct
hclgevf_rss_cfg
*
rss_cfg
=
&
hdev
->
rss_cfg
;
int
ret
;
if
(
hdev
->
pdev
->
revision
>=
0x21
)
{
ret
=
hclgevf_set_rss_algo_key
(
hdev
,
rss_cfg
->
hash_algo
,
rss_cfg
->
rss_hash_key
);
if
(
ret
)
return
ret
;
rss_cfg
->
rss_tuple_sets
.
ipv4_tcp_en
=
HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
rss_cfg
->
rss_tuple_sets
.
ipv4_udp_en
=
HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
rss_cfg
->
rss_tuple_sets
.
ipv4_sctp_en
=
HCLGEVF_RSS_INPUT_TUPLE_SCTP
;
rss_cfg
->
rss_tuple_sets
.
ipv4_fragment_en
=
HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
rss_cfg
->
rss_tuple_sets
.
ipv6_tcp_en
=
HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
rss_cfg
->
rss_tuple_sets
.
ipv6_udp_en
=
HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
rss_cfg
->
rss_tuple_sets
.
ipv6_sctp_en
=
HCLGEVF_RSS_INPUT_TUPLE_SCTP
;
rss_cfg
->
rss_tuple_sets
.
ipv6_fragment_en
=
HCLGEVF_RSS_INPUT_TUPLE_OTHER
;
ret
=
hclgevf_set_rss_input_tuple
(
hdev
,
rss_cfg
);
if
(
ret
)
return
ret
;
}
/* Initialize RSS indirect table */
for
(
i
=
0
;
i
<
HCLGEVF_RSS_IND_TBL_SIZE
;
i
++
)
rss_cfg
->
rss_indirection_tbl
[
i
]
=
i
%
rss_cfg
->
rss_size
;
ret
=
hclgevf_set_rss_indir_table
(
hdev
);
if
(
ret
)
return
ret
;
...
...
@@ -2793,6 +2794,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto
err_config
;
/* Initialize RSS for this VF */
hclgevf_rss_init_cfg
(
hdev
);
ret
=
hclgevf_rss_init_hw
(
hdev
);
if
(
ret
)
{
dev_err
(
&
hdev
->
pdev
->
dev
,
...
...
@@ -2967,6 +2969,8 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
for
(
i
=
0
;
i
<
HCLGEVF_RSS_IND_TBL_SIZE
;
i
++
)
rss_indir
[
i
]
=
i
%
kinfo
->
rss_size
;
hdev
->
rss_cfg
.
rss_size
=
kinfo
->
rss_size
;
ret
=
hclgevf_set_rss
(
handle
,
rss_indir
,
NULL
,
0
);
if
(
ret
)
dev_err
(
&
hdev
->
pdev
->
dev
,
"set rss indir table fail, ret=%d
\n
"
,
...
...
@@ -3220,7 +3224,7 @@ static int hclgevf_init(void)
{
pr_info
(
"%s is initializing
\n
"
,
HCLGEVF_NAME
);
hclgevf_wq
=
alloc_workqueue
(
"%s"
,
WQ_MEM_RECLAIM
,
0
,
HCLGEVF_NAME
);
hclgevf_wq
=
alloc_workqueue
(
"%s"
,
0
,
0
,
HCLGEVF_NAME
);
if
(
!
hclgevf_wq
)
{
pr_err
(
"%s: failed to create workqueue
\n
"
,
HCLGEVF_NAME
);
return
-
ENOMEM
;
...
...
net/core/skbuff.c
View file @
5a470b1a
...
...
@@ -3668,6 +3668,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
skb_push
(
nskb
,
-
skb_network_offset
(
nskb
)
+
offset
);
skb_release_head_state
(
nskb
);
__copy_skb_header
(
nskb
,
skb
);
skb_headers_offset_update
(
nskb
,
skb_headroom
(
nskb
)
-
skb_headroom
(
skb
));
...
...
net/ipv4/fib_trie.c
View file @
5a470b1a
...
...
@@ -2572,6 +2572,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
" %zd bytes, size of tnode: %zd bytes.
\n
"
,
LEAF_SIZE
,
TNODE_SIZE
(
0
));
rcu_read_lock
();
for
(
h
=
0
;
h
<
FIB_TABLE_HASHSZ
;
h
++
)
{
struct
hlist_head
*
head
=
&
net
->
ipv4
.
fib_table_hash
[
h
];
struct
fib_table
*
tb
;
...
...
@@ -2591,7 +2592,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
trie_show_usage
(
seq
,
t
->
stats
);
#endif
}
cond_resched_rcu
();
}
rcu_read_unlock
();
return
0
;
}
...
...
net/ipv4/ip_tunnel.c
View file @
5a470b1a
...
...
@@ -142,11 +142,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
cand
=
t
;
}
if
(
flags
&
TUNNEL_NO_KEY
)
goto
skip_key_lookup
;
hlist_for_each_entry_rcu
(
t
,
head
,
hash_node
)
{
if
(
t
->
parms
.
i_key
!=
key
||
if
(
(
!
(
flags
&
TUNNEL_NO_KEY
)
&&
t
->
parms
.
i_key
!=
key
)
||
t
->
parms
.
iph
.
saddr
!=
0
||
t
->
parms
.
iph
.
daddr
!=
0
||
!
(
t
->
dev
->
flags
&
IFF_UP
))
...
...
@@ -158,7 +155,6 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
cand
=
t
;
}
skip_key_lookup:
if
(
cand
)
return
cand
;
...
...
net/ipv4/udp_offload.c
View file @
5a470b1a
...
...
@@ -453,6 +453,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
unsigned
int
off
=
skb_gro_offset
(
skb
);
int
flush
=
1
;
NAPI_GRO_CB
(
skb
)
->
is_flist
=
0
;
if
(
skb
->
dev
->
features
&
NETIF_F_GRO_FRAGLIST
)
NAPI_GRO_CB
(
skb
)
->
is_flist
=
sk
?
!
udp_sk
(
sk
)
->
gro_enabled
:
1
;
...
...
net/mac80211/tx.c
View file @
5a470b1a
...
...
@@ -3611,7 +3611,8 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
* Drop unicast frames to unauthorised stations unless they are
* EAPOL frames from the local station.
*/
if
(
unlikely
(
!
ieee80211_vif_is_mesh
(
&
tx
.
sdata
->
vif
)
&&
if
(
unlikely
(
ieee80211_is_data
(
hdr
->
frame_control
)
&&
!
ieee80211_vif_is_mesh
(
&
tx
.
sdata
->
vif
)
&&
tx
.
sdata
->
vif
.
type
!=
NL80211_IFTYPE_OCB
&&
!
is_multicast_ether_addr
(
hdr
->
addr1
)
&&
!
test_sta_flag
(
tx
.
sta
,
WLAN_STA_AUTHORIZED
)
&&
...
...
net/sctp/ipv6.c
View file @
5a470b1a
...
...
@@ -228,7 +228,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
{
struct
sctp_association
*
asoc
=
t
->
asoc
;
struct
dst_entry
*
dst
=
NULL
;
struct
flowi6
*
fl6
=
&
fl
->
u
.
ip6
;
struct
flowi
_fl
;
struct
flowi6
*
fl6
=
&
_fl
.
u
.
ip6
;
struct
sctp_bind_addr
*
bp
;
struct
ipv6_pinfo
*
np
=
inet6_sk
(
sk
);
struct
sctp_sockaddr_entry
*
laddr
;
...
...
@@ -238,7 +239,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
enum
sctp_scope
scope
;
__u8
matchlen
=
0
;
memset
(
fl6
,
0
,
sizeof
(
struct
flowi6
));
memset
(
&
_fl
,
0
,
sizeof
(
_fl
));
fl6
->
daddr
=
daddr
->
v6
.
sin6_addr
;
fl6
->
fl6_dport
=
daddr
->
v6
.
sin6_port
;
fl6
->
flowi6_proto
=
IPPROTO_SCTP
;
...
...
@@ -276,8 +277,11 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
rcu_read_unlock
();
dst
=
ip6_dst_lookup_flow
(
sock_net
(
sk
),
sk
,
fl6
,
final_p
);
if
(
!
asoc
||
saddr
)
if
(
!
asoc
||
saddr
)
{
t
->
dst
=
dst
;
memcpy
(
fl
,
&
_fl
,
sizeof
(
_fl
));
goto
out
;
}
bp
=
&
asoc
->
base
.
bind_addr
;
scope
=
sctp_scope
(
daddr
);
...
...
@@ -300,6 +304,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if
((
laddr
->
a
.
sa
.
sa_family
==
AF_INET6
)
&&
(
sctp_v6_cmp_addr
(
&
dst_saddr
,
&
laddr
->
a
)))
{
rcu_read_unlock
();
t
->
dst
=
dst
;
memcpy
(
fl
,
&
_fl
,
sizeof
(
_fl
));
goto
out
;
}
}
...
...
@@ -338,6 +344,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if
(
!
IS_ERR_OR_NULL
(
dst
))
dst_release
(
dst
);
dst
=
bdst
;
t
->
dst
=
dst
;
memcpy
(
fl
,
&
_fl
,
sizeof
(
_fl
));
break
;
}
...
...
@@ -351,6 +359,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
dst_release
(
dst
);
dst
=
bdst
;
matchlen
=
bmatchlen
;
t
->
dst
=
dst
;
memcpy
(
fl
,
&
_fl
,
sizeof
(
_fl
));
}
rcu_read_unlock
();
...
...
@@ -359,14 +369,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
struct
rt6_info
*
rt
;
rt
=
(
struct
rt6_info
*
)
dst
;
t
->
dst
=
dst
;
t
->
dst_cookie
=
rt6_get_cookie
(
rt
);
pr_debug
(
"rt6_dst:%pI6/%d rt6_src:%pI6
\n
"
,
&
rt
->
rt6i_dst
.
addr
,
rt
->
rt6i_dst
.
plen
,
&
fl
6
->
saddr
);
&
fl
->
u
.
ip6
.
saddr
);
}
else
{
t
->
dst
=
NULL
;
pr_debug
(
"no route
\n
"
);
}
}
...
...
net/sctp/protocol.c
View file @
5a470b1a
...
...
@@ -409,7 +409,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
{
struct
sctp_association
*
asoc
=
t
->
asoc
;
struct
rtable
*
rt
;
struct
flowi4
*
fl4
=
&
fl
->
u
.
ip4
;
struct
flowi
_fl
;
struct
flowi4
*
fl4
=
&
_fl
.
u
.
ip4
;
struct
sctp_bind_addr
*
bp
;
struct
sctp_sockaddr_entry
*
laddr
;
struct
dst_entry
*
dst
=
NULL
;
...
...
@@ -419,7 +420,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if
(
t
->
dscp
&
SCTP_DSCP_SET_MASK
)
tos
=
t
->
dscp
&
SCTP_DSCP_VAL_MASK
;
memset
(
fl4
,
0x0
,
sizeof
(
struct
flowi4
));
memset
(
&
_fl
,
0x0
,
sizeof
(
_fl
));
fl4
->
daddr
=
daddr
->
v4
.
sin_addr
.
s_addr
;
fl4
->
fl4_dport
=
daddr
->
v4
.
sin_port
;
fl4
->
flowi4_proto
=
IPPROTO_SCTP
;
...
...
@@ -438,8 +439,11 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
&
fl4
->
saddr
);
rt
=
ip_route_output_key
(
sock_net
(
sk
),
fl4
);
if
(
!
IS_ERR
(
rt
))
if
(
!
IS_ERR
(
rt
))
{
dst
=
&
rt
->
dst
;
t
->
dst
=
dst
;
memcpy
(
fl
,
&
_fl
,
sizeof
(
_fl
));
}
/* If there is no association or if a source address is passed, no
* more validation is required.
...
...
@@ -502,27 +506,33 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
odev
=
__ip_dev_find
(
sock_net
(
sk
),
laddr
->
a
.
v4
.
sin_addr
.
s_addr
,
false
);
if
(
!
odev
||
odev
->
ifindex
!=
fl4
->
flowi4_oif
)
{
if
(
!
dst
)
if
(
!
dst
)
{
dst
=
&
rt
->
dst
;
else
t
->
dst
=
dst
;
memcpy
(
fl
,
&
_fl
,
sizeof
(
_fl
));
}
else
{
dst_release
(
&
rt
->
dst
);
}
continue
;
}
dst_release
(
dst
);
dst
=
&
rt
->
dst
;
t
->
dst
=
dst
;
memcpy
(
fl
,
&
_fl
,
sizeof
(
_fl
));
break
;
}
out_unlock:
rcu_read_unlock
();
out:
t
->
dst
=
dst
;
if
(
dst
)
if
(
dst
)
{
pr_debug
(
"rt_dst:%pI4, rt_src:%pI4
\n
"
,
&
fl4
->
daddr
,
&
fl4
->
saddr
);
else
&
fl
->
u
.
ip4
.
daddr
,
&
fl
->
u
.
ip4
.
saddr
);
}
else
{
t
->
dst
=
NULL
;
pr_debug
(
"no route
\n
"
);
}
}
/* For v4, the source address is cached in the route entry(dst). So no need
...
...
net/sctp/socket.c
View file @
5a470b1a
...
...
@@ -147,29 +147,44 @@ static void sctp_clear_owner_w(struct sctp_chunk *chunk)
skb_orphan
(
chunk
->
skb
);
}
#define traverse_and_process() \
do { \
msg = chunk->msg; \
if (msg == prev_msg) \
continue; \
list_for_each_entry(c, &msg->chunks, frag_list) { \
if ((clear && asoc->base.sk == c->skb->sk) || \
(!clear && asoc->base.sk != c->skb->sk)) \
cb(c); \
} \
prev_msg = msg; \
} while (0)
static
void
sctp_for_each_tx_datachunk
(
struct
sctp_association
*
asoc
,
bool
clear
,
void
(
*
cb
)(
struct
sctp_chunk
*
))
{
struct
sctp_datamsg
*
msg
,
*
prev_msg
=
NULL
;
struct
sctp_outq
*
q
=
&
asoc
->
outqueue
;
struct
sctp_chunk
*
chunk
,
*
c
;
struct
sctp_transport
*
t
;
struct
sctp_chunk
*
chunk
;
list_for_each_entry
(
t
,
&
asoc
->
peer
.
transport_addr_list
,
transports
)
list_for_each_entry
(
chunk
,
&
t
->
transmitted
,
transmitted_list
)
cb
(
chunk
);
traverse_and_process
(
);
list_for_each_entry
(
chunk
,
&
q
->
retransmit
,
transmitted_list
)
cb
(
chunk
);
traverse_and_process
(
);
list_for_each_entry
(
chunk
,
&
q
->
sacked
,
transmitted_list
)
cb
(
chunk
);
traverse_and_process
(
);
list_for_each_entry
(
chunk
,
&
q
->
abandoned
,
transmitted_list
)
cb
(
chunk
);
traverse_and_process
(
);
list_for_each_entry
(
chunk
,
&
q
->
out_chunk_list
,
list
)
cb
(
chunk
);
traverse_and_process
(
);
}
static
void
sctp_for_each_rx_skb
(
struct
sctp_association
*
asoc
,
struct
sock
*
sk
,
...
...
@@ -9574,9 +9589,9 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* paths won't try to lock it and then oldsk.
*/
lock_sock_nested
(
newsk
,
SINGLE_DEPTH_NESTING
);
sctp_for_each_tx_datachunk
(
assoc
,
sctp_clear_owner_w
);
sctp_for_each_tx_datachunk
(
assoc
,
true
,
sctp_clear_owner_w
);
sctp_assoc_migrate
(
assoc
,
newsk
);
sctp_for_each_tx_datachunk
(
assoc
,
sctp_set_owner_w
);
sctp_for_each_tx_datachunk
(
assoc
,
false
,
sctp_set_owner_w
);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment