Commit 8d6e79d3 authored by Jon Maloy's avatar Jon Maloy Committed by David S. Miller

tipc: improve link resiliency when rps is activated

Currently, the TIPC RPS dissector is based only on the incoming packets'
source node address, hence steering all traffic from a node to the same
core. We have seen that this makes the links vulnerable to starvation
and unnecessary resets when we turn down the link tolerance to very low
values.

To reduce the risk of this happening, we exempt probe and probe replies
packets from the convergence to one core per source node. Instead, we do
the opposite, - we try to diverge those packets across as many cores as
possible, by randomizing the flow selector key.

To make such packets identifiable to the dissector, we add a new
'is_keepalive' bit to word 0 of the LINK_PROTOCOL header. This bit is
set both for PROBE and PROBE_REPLY messages, and only for those.

It should be noted that these packets are not part of any flow anyway,
and only constitute a minuscule fraction of all packets sent across a
link. Hence, there is no risk that this will affect overall performance.
Acked-by: default avatarYing Xue <ying.xue@windriver.com>
Signed-off-by: default avatarJon Maloy <jon.maloy@ericsson.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 141f575f
...@@ -84,11 +84,11 @@ struct flow_dissector_key_ipv6_addrs { ...@@ -84,11 +84,11 @@ struct flow_dissector_key_ipv6_addrs {
}; };
/** /**
* struct flow_dissector_key_tipc_addrs: * struct flow_dissector_key_tipc:
* @srcnode: source node address * @key: source node address combined with selector
*/ */
struct flow_dissector_key_tipc_addrs { struct flow_dissector_key_tipc {
__be32 srcnode; __be32 key;
}; };
/** /**
...@@ -100,7 +100,7 @@ struct flow_dissector_key_addrs { ...@@ -100,7 +100,7 @@ struct flow_dissector_key_addrs {
union { union {
struct flow_dissector_key_ipv4_addrs v4addrs; struct flow_dissector_key_ipv4_addrs v4addrs;
struct flow_dissector_key_ipv6_addrs v6addrs; struct flow_dissector_key_ipv6_addrs v6addrs;
struct flow_dissector_key_tipc_addrs tipcaddrs; struct flow_dissector_key_tipc tipckey;
}; };
}; };
...@@ -192,7 +192,7 @@ enum flow_dissector_key_id { ...@@ -192,7 +192,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */ FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */
FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */ FLOW_DISSECTOR_KEY_TIPC, /* struct flow_dissector_key_tipc */
FLOW_DISSECTOR_KEY_ARP, /* struct flow_dissector_key_arp */ FLOW_DISSECTOR_KEY_ARP, /* struct flow_dissector_key_arp */
FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */ FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */
FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
......
/*
* include/net/tipc.h: Include file for TIPC message header routines
*
* Copyright (c) 2017 Ericsson AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _TIPC_HDR_H
#define _TIPC_HDR_H
#include <linux/random.h>
#define KEEPALIVE_MSG_MASK 0x0e080000 /* LINK_PROTOCOL + MSG_IS_KEEPALIVE */
struct tipc_basic_hdr {
__be32 w[4];
};
static inline u32 tipc_hdr_rps_key(struct tipc_basic_hdr *hdr)
{
u32 w0 = ntohl(hdr->w[0]);
bool keepalive_msg = (w0 & KEEPALIVE_MSG_MASK) == KEEPALIVE_MSG_MASK;
int key;
/* Return source node identity as key */
if (likely(!keepalive_msg))
return hdr->w[3];
/* Spread PROBE/PROBE_REPLY messages across the cores */
get_random_bytes(&key, sizeof(key));
return key;
}
#endif
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <net/ipv6.h> #include <net/ipv6.h>
#include <net/gre.h> #include <net/gre.h>
#include <net/pptp.h> #include <net/pptp.h>
#include <net/tipc.h>
#include <linux/igmp.h> #include <linux/igmp.h>
#include <linux/icmp.h> #include <linux/icmp.h>
#include <linux/sctp.h> #include <linux/sctp.h>
...@@ -772,23 +773,22 @@ bool __skb_flow_dissect(const struct sk_buff *skb, ...@@ -772,23 +773,22 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
break; break;
} }
case htons(ETH_P_TIPC): { case htons(ETH_P_TIPC): {
struct { struct tipc_basic_hdr *hdr, _hdr;
__be32 pre[3];
__be32 srcnode; hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr),
} *hdr, _hdr; data, hlen, &_hdr);
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr) { if (!hdr) {
fdret = FLOW_DISSECT_RET_OUT_BAD; fdret = FLOW_DISSECT_RET_OUT_BAD;
break; break;
} }
if (dissector_uses_key(flow_dissector, if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_TIPC_ADDRS)) { FLOW_DISSECTOR_KEY_TIPC)) {
key_addrs = skb_flow_dissector_target(flow_dissector, key_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_TIPC_ADDRS, FLOW_DISSECTOR_KEY_TIPC,
target_container); target_container);
key_addrs->tipcaddrs.srcnode = hdr->srcnode; key_addrs->tipckey.key = tipc_hdr_rps_key(hdr);
key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS; key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC;
} }
fdret = FLOW_DISSECT_RET_OUT_GOOD; fdret = FLOW_DISSECT_RET_OUT_GOOD;
break; break;
...@@ -1024,8 +1024,8 @@ static inline size_t flow_keys_hash_length(const struct flow_keys *flow) ...@@ -1024,8 +1024,8 @@ static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
case FLOW_DISSECTOR_KEY_IPV6_ADDRS: case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
diff -= sizeof(flow->addrs.v6addrs); diff -= sizeof(flow->addrs.v6addrs);
break; break;
case FLOW_DISSECTOR_KEY_TIPC_ADDRS: case FLOW_DISSECTOR_KEY_TIPC:
diff -= sizeof(flow->addrs.tipcaddrs); diff -= sizeof(flow->addrs.tipckey);
break; break;
} }
return (sizeof(*flow) - diff) / sizeof(u32); return (sizeof(*flow) - diff) / sizeof(u32);
...@@ -1039,8 +1039,8 @@ __be32 flow_get_u32_src(const struct flow_keys *flow) ...@@ -1039,8 +1039,8 @@ __be32 flow_get_u32_src(const struct flow_keys *flow)
case FLOW_DISSECTOR_KEY_IPV6_ADDRS: case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
return (__force __be32)ipv6_addr_hash( return (__force __be32)ipv6_addr_hash(
&flow->addrs.v6addrs.src); &flow->addrs.v6addrs.src);
case FLOW_DISSECTOR_KEY_TIPC_ADDRS: case FLOW_DISSECTOR_KEY_TIPC:
return flow->addrs.tipcaddrs.srcnode; return flow->addrs.tipckey.key;
default: default:
return 0; return 0;
} }
...@@ -1321,8 +1321,8 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = { ...@@ -1321,8 +1321,8 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
.offset = offsetof(struct flow_keys, addrs.v6addrs), .offset = offsetof(struct flow_keys, addrs.v6addrs),
}, },
{ {
.key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS, .key_id = FLOW_DISSECTOR_KEY_TIPC,
.offset = offsetof(struct flow_keys, addrs.tipcaddrs), .offset = offsetof(struct flow_keys, addrs.tipckey),
}, },
{ {
.key_id = FLOW_DISSECTOR_KEY_PORTS, .key_id = FLOW_DISSECTOR_KEY_PORTS,
......
...@@ -239,7 +239,8 @@ static int link_is_up(struct tipc_link *l) ...@@ -239,7 +239,8 @@ static int link_is_up(struct tipc_link *l)
static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq); struct sk_buff_head *xmitq);
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
u16 rcvgap, int tolerance, int priority, bool probe_reply, u16 rcvgap,
int tolerance, int priority,
struct sk_buff_head *xmitq); struct sk_buff_head *xmitq);
static void link_print(struct tipc_link *l, const char *str); static void link_print(struct tipc_link *l, const char *str);
static int tipc_link_build_nack_msg(struct tipc_link *l, static int tipc_link_build_nack_msg(struct tipc_link *l,
...@@ -773,7 +774,7 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) ...@@ -773,7 +774,7 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
} }
if (state || probe || setup) if (state || probe || setup)
tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq); tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
return rc; return rc;
} }
...@@ -1174,7 +1175,7 @@ int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq) ...@@ -1174,7 +1175,7 @@ int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
/* Unicast ACK */ /* Unicast ACK */
l->rcv_unacked = 0; l->rcv_unacked = 0;
l->stats.sent_acks++; l->stats.sent_acks++;
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq); tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
return 0; return 0;
} }
...@@ -1188,7 +1189,7 @@ void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq) ...@@ -1188,7 +1189,7 @@ void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
if (l->state == LINK_ESTABLISHING) if (l->state == LINK_ESTABLISHING)
mtyp = ACTIVATE_MSG; mtyp = ACTIVATE_MSG;
tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq); tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
/* Inform peer that this endpoint is going down if applicable */ /* Inform peer that this endpoint is going down if applicable */
skb = skb_peek_tail(xmitq); skb = skb_peek_tail(xmitq);
...@@ -1215,7 +1216,7 @@ static int tipc_link_build_nack_msg(struct tipc_link *l, ...@@ -1215,7 +1216,7 @@ static int tipc_link_build_nack_msg(struct tipc_link *l,
} }
if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV)) if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq); tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
return 0; return 0;
} }
...@@ -1289,7 +1290,8 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, ...@@ -1289,7 +1290,8 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
} }
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
u16 rcvgap, int tolerance, int priority, bool probe_reply, u16 rcvgap,
int tolerance, int priority,
struct sk_buff_head *xmitq) struct sk_buff_head *xmitq)
{ {
struct tipc_link *bcl = l->bc_rcvlink; struct tipc_link *bcl = l->bc_rcvlink;
...@@ -1337,6 +1339,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, ...@@ -1337,6 +1339,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
msg_set_seq_gap(hdr, rcvgap); msg_set_seq_gap(hdr, rcvgap);
msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl)); msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
msg_set_probe(hdr, probe); msg_set_probe(hdr, probe);
msg_set_is_keepalive(hdr, probe || probe_reply);
tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id); tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
msg_set_size(hdr, INT_H_SIZE + dlen); msg_set_size(hdr, INT_H_SIZE + dlen);
skb_trim(skb, INT_H_SIZE + dlen); skb_trim(skb, INT_H_SIZE + dlen);
...@@ -1442,6 +1445,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, ...@@ -1442,6 +1445,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
u16 rcv_nxt = l->rcv_nxt; u16 rcv_nxt = l->rcv_nxt;
u16 dlen = msg_data_sz(hdr); u16 dlen = msg_data_sz(hdr);
int mtyp = msg_type(hdr); int mtyp = msg_type(hdr);
bool reply = msg_probe(hdr);
void *data; void *data;
char *if_name; char *if_name;
int rc = 0; int rc = 0;
...@@ -1528,9 +1532,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, ...@@ -1528,9 +1532,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
/* Send NACK if peer has sent pkts we haven't received yet */ /* Send NACK if peer has sent pkts we haven't received yet */
if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
rcvgap = peers_snd_nxt - l->rcv_nxt; rcvgap = peers_snd_nxt - l->rcv_nxt;
if (rcvgap || (msg_probe(hdr))) if (rcvgap || reply)
tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap, tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
0, 0, xmitq); rcvgap, 0, 0, xmitq);
tipc_link_release_pkts(l, ack); tipc_link_release_pkts(l, ack);
/* If NACK, retransmit will now start at right position */ /* If NACK, retransmit will now start at right position */
...@@ -2122,14 +2126,14 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, ...@@ -2122,14 +2126,14 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
struct sk_buff_head *xmitq) struct sk_buff_head *xmitq)
{ {
l->tolerance = tol; l->tolerance = tol;
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq); tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
} }
void tipc_link_set_prio(struct tipc_link *l, u32 prio, void tipc_link_set_prio(struct tipc_link *l, u32 prio,
struct sk_buff_head *xmitq) struct sk_buff_head *xmitq)
{ {
l->priority = prio; l->priority = prio;
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq); tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
} }
void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit) void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
......
...@@ -226,6 +226,16 @@ static inline void msg_set_dest_droppable(struct tipc_msg *m, u32 d) ...@@ -226,6 +226,16 @@ static inline void msg_set_dest_droppable(struct tipc_msg *m, u32 d)
msg_set_bits(m, 0, 19, 1, d); msg_set_bits(m, 0, 19, 1, d);
} }
static inline int msg_is_keepalive(struct tipc_msg *m)
{
return msg_bits(m, 0, 19, 1);
}
static inline void msg_set_is_keepalive(struct tipc_msg *m, u32 d)
{
msg_set_bits(m, 0, 19, 1, d);
}
static inline int msg_src_droppable(struct tipc_msg *m) static inline int msg_src_droppable(struct tipc_msg *m)
{ {
return msg_bits(m, 0, 18, 1); return msg_bits(m, 0, 18, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment