Commit 6c647935 authored by David S. Miller's avatar David S. Miller

Merge branch 'hv_netvsc-Ethtool-handler-to-change-UDP-hash-levels'

Haiyang Zhang says:

====================
hv_netvsc: Ethtool handler to change UDP hash levels

The patch set adds the functions to switch UDP hash level between
L3 and L4 by ethtool command. UDP over IPv4 and v6 can be set
differently. The default hash level is L4. We currently only
allow switching TX hash level from within the guests.

The ethtool callback function is triggered by command line, and
update the per device variables of the hash level.

On Azure, fragmented UDP packets is not yet supported with L4
hashing, and may have high packet loss rate. Using L3 hashing is
recommended in this case. This ethtool option allows a user to
make this selection.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0580b53f 3b0c3458
...@@ -21,11 +21,23 @@ Features ...@@ -21,11 +21,23 @@ Features
-------------------- --------------------
Hyper-V supports receive side scaling. For TCP, packets are Hyper-V supports receive side scaling. For TCP, packets are
distributed among available queues based on IP address and port distributed among available queues based on IP address and port
number. Current versions of Hyper-V host, only distribute UDP number.
packets based on the IP source and destination address.
The port number is not used as part of the hash value for UDP. For UDP, we can switch UDP hash level between L3 and L4 by ethtool
Fragmented IP packets are not distributed between queues; command. UDP over IPv4 and v6 can be set differently. The default
all fragmented packets arrive on the first channel. hash level is L4. We currently only allow switching TX hash level
from within the guests.
On Azure, fragmented UDP packets have high loss rate with L4
hashing. Using L3 hashing is recommended in this case.
For example, for UDP over IPv4 on eth0:
To include UDP port numbers in hasing:
ethtool -N eth0 rx-flow-hash udp4 sdfn
To exclude UDP port numbers in hasing:
ethtool -N eth0 rx-flow-hash udp4 sd
To show UDP hash level:
ethtool -n eth0 rx-flow-hash udp4
Generic Receive Offload, aka GRO Generic Receive Offload, aka GRO
-------------------------------- --------------------------------
......
...@@ -720,6 +720,8 @@ struct net_device_context { ...@@ -720,6 +720,8 @@ struct net_device_context {
u32 tx_send_table[VRSS_SEND_TAB_SIZE]; u32 tx_send_table[VRSS_SEND_TAB_SIZE];
/* Ethtool settings */ /* Ethtool settings */
bool udp4_l4_hash;
bool udp6_l4_hash;
u8 duplex; u8 duplex;
u32 speed; u32 speed;
struct netvsc_ethtool_stats eth_stats; struct netvsc_ethtool_stats eth_stats;
......
...@@ -190,10 +190,12 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, ...@@ -190,10 +190,12 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
return ppi; return ppi;
} }
/* Azure hosts don't support non-TCP port numbers in hashing yet. We compute /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
* hash for non-TCP traffic with only IP numbers. * packets. We can use ethtool to change UDP hash level when necessary.
*/ */
static inline u32 netvsc_get_hash(struct sk_buff *skb, struct sock *sk) static inline u32 netvsc_get_hash(
struct sk_buff *skb,
const struct net_device_context *ndc)
{ {
struct flow_keys flow; struct flow_keys flow;
u32 hash; u32 hash;
...@@ -204,7 +206,11 @@ static inline u32 netvsc_get_hash(struct sk_buff *skb, struct sock *sk) ...@@ -204,7 +206,11 @@ static inline u32 netvsc_get_hash(struct sk_buff *skb, struct sock *sk)
if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
return 0; return 0;
if (flow.basic.ip_proto == IPPROTO_TCP) { if (flow.basic.ip_proto == IPPROTO_TCP ||
(flow.basic.ip_proto == IPPROTO_UDP &&
((flow.basic.n_proto == htons(ETH_P_IP) && ndc->udp4_l4_hash) ||
(flow.basic.n_proto == htons(ETH_P_IPV6) &&
ndc->udp6_l4_hash)))) {
return skb_get_hash(skb); return skb_get_hash(skb);
} else { } else {
if (flow.basic.n_proto == htons(ETH_P_IP)) if (flow.basic.n_proto == htons(ETH_P_IP))
...@@ -227,7 +233,7 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev, ...@@ -227,7 +233,7 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev,
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
int q_idx; int q_idx;
q_idx = ndc->tx_send_table[netvsc_get_hash(skb, sk) & q_idx = ndc->tx_send_table[netvsc_get_hash(skb, ndc) &
(VRSS_SEND_TAB_SIZE - 1)]; (VRSS_SEND_TAB_SIZE - 1)];
/* If queue index changed record the new value */ /* If queue index changed record the new value */
...@@ -891,6 +897,9 @@ static void netvsc_init_settings(struct net_device *dev) ...@@ -891,6 +897,9 @@ static void netvsc_init_settings(struct net_device *dev)
{ {
struct net_device_context *ndc = netdev_priv(dev); struct net_device_context *ndc = netdev_priv(dev);
ndc->udp4_l4_hash = true;
ndc->udp6_l4_hash = true;
ndc->speed = SPEED_UNKNOWN; ndc->speed = SPEED_UNKNOWN;
ndc->duplex = DUPLEX_FULL; ndc->duplex = DUPLEX_FULL;
} }
...@@ -1228,7 +1237,7 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) ...@@ -1228,7 +1237,7 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
} }
static int static int
netvsc_get_rss_hash_opts(struct netvsc_device *nvdev, netvsc_get_rss_hash_opts(struct net_device_context *ndc,
struct ethtool_rxnfc *info) struct ethtool_rxnfc *info)
{ {
info->data = RXH_IP_SRC | RXH_IP_DST; info->data = RXH_IP_SRC | RXH_IP_DST;
...@@ -1237,9 +1246,20 @@ netvsc_get_rss_hash_opts(struct netvsc_device *nvdev, ...@@ -1237,9 +1246,20 @@ netvsc_get_rss_hash_opts(struct netvsc_device *nvdev,
case TCP_V4_FLOW: case TCP_V4_FLOW:
case TCP_V6_FLOW: case TCP_V6_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
/* fallthrough */ break;
case UDP_V4_FLOW: case UDP_V4_FLOW:
if (ndc->udp4_l4_hash)
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V6_FLOW: case UDP_V6_FLOW:
if (ndc->udp6_l4_hash)
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case IPV4_FLOW: case IPV4_FLOW:
case IPV6_FLOW: case IPV6_FLOW:
break; break;
...@@ -1267,8 +1287,48 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, ...@@ -1267,8 +1287,48 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return 0; return 0;
case ETHTOOL_GRXFH: case ETHTOOL_GRXFH:
return netvsc_get_rss_hash_opts(nvdev, info); return netvsc_get_rss_hash_opts(ndc, info);
}
return -EOPNOTSUPP;
}
static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
struct ethtool_rxnfc *info)
{
if (info->data == (RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
if (info->flow_type == UDP_V4_FLOW)
ndc->udp4_l4_hash = true;
else if (info->flow_type == UDP_V6_FLOW)
ndc->udp6_l4_hash = true;
else
return -EOPNOTSUPP;
return 0;
}
if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
if (info->flow_type == UDP_V4_FLOW)
ndc->udp4_l4_hash = false;
else if (info->flow_type == UDP_V6_FLOW)
ndc->udp6_l4_hash = false;
else
return -EOPNOTSUPP;
return 0;
} }
return -EOPNOTSUPP;
}
static int
netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
{
struct net_device_context *ndc = netdev_priv(ndev);
if (info->cmd == ETHTOOL_SRXFH)
return netvsc_set_rss_hash_opts(ndc, info);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -1470,6 +1530,7 @@ static const struct ethtool_ops ethtool_ops = { ...@@ -1470,6 +1530,7 @@ static const struct ethtool_ops ethtool_ops = {
.set_channels = netvsc_set_channels, .set_channels = netvsc_set_channels,
.get_ts_info = ethtool_op_get_ts_info, .get_ts_info = ethtool_op_get_ts_info,
.get_rxnfc = netvsc_get_rxnfc, .get_rxnfc = netvsc_get_rxnfc,
.set_rxnfc = netvsc_set_rxnfc,
.get_rxfh_key_size = netvsc_get_rxfh_key_size, .get_rxfh_key_size = netvsc_get_rxfh_key_size,
.get_rxfh_indir_size = netvsc_rss_indir_size, .get_rxfh_indir_size = netvsc_rss_indir_size,
.get_rxfh = netvsc_get_rxfh, .get_rxfh = netvsc_get_rxfh,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment