Commit e01ec219 authored by KY Srinivasan's avatar KY Srinivasan Committed by David S. Miller

hv_netvsc: Properly size the vrss queues

The current algorithm for deciding on the number of VRSS channels is
not optimal since we open up the min of number of CPUs online and the
number of VRSS channels the host is offering. So on a 32 VCPU guest
we could potentially open 32 VRSS subchannels. Experimentation has
shown that it is best to limit the number of VRSS channels to the number
of CPUs within a NUMA node.

Here is the new algorithm for deciding on the number of sub-channels we
would open up:
        1) Pick the minimum of what the host is offering and what the driver
           in the guest is specifying as the default value.
        2) Pick the minimum of (1) and the numbers of CPUs in the NUMA
           node the primary channel is bound to.
Signed-off-by: default avatarK. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8133534c
...@@ -161,6 +161,7 @@ struct netvsc_device_info { ...@@ -161,6 +161,7 @@ struct netvsc_device_info {
unsigned char mac_adr[ETH_ALEN]; unsigned char mac_adr[ETH_ALEN];
bool link_state; /* 0 - link up, 1 - link down */ bool link_state; /* 0 - link up, 1 - link down */
int ring_size; int ring_size;
u32 max_num_vrss_chns;
}; };
enum rndis_device_state { enum rndis_device_state {
......
...@@ -46,6 +46,8 @@ static int ring_size = 128; ...@@ -46,6 +46,8 @@ static int ring_size = 128;
module_param(ring_size, int, S_IRUGO); module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
static int max_num_vrss_chns = 8;
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
...@@ -755,6 +757,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) ...@@ -755,6 +757,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
ndevctx->device_ctx = hdev; ndevctx->device_ctx = hdev;
hv_set_drvdata(hdev, ndev); hv_set_drvdata(hdev, ndev);
device_info.ring_size = ring_size; device_info.ring_size = ring_size;
device_info.max_num_vrss_chns = max_num_vrss_chns;
rndis_filter_device_add(hdev, &device_info); rndis_filter_device_add(hdev, &device_info);
netif_tx_wake_all_queues(ndev); netif_tx_wake_all_queues(ndev);
...@@ -975,6 +978,7 @@ static int netvsc_probe(struct hv_device *dev, ...@@ -975,6 +978,7 @@ static int netvsc_probe(struct hv_device *dev,
/* Notify the netvsc driver of the new device */ /* Notify the netvsc driver of the new device */
device_info.ring_size = ring_size; device_info.ring_size = ring_size;
device_info.max_num_vrss_chns = max_num_vrss_chns;
ret = rndis_filter_device_add(dev, &device_info); ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) { if (ret != 0) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
......
...@@ -1013,6 +1013,9 @@ int rndis_filter_device_add(struct hv_device *dev, ...@@ -1013,6 +1013,9 @@ int rndis_filter_device_add(struct hv_device *dev,
struct ndis_recv_scale_cap rsscap; struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size; u32 mtu, size;
u32 num_rss_qs;
const struct cpumask *node_cpu_mask;
u32 num_possible_rss_qs;
rndis_device = get_rndis_device(); rndis_device = get_rndis_device();
if (!rndis_device) if (!rndis_device)
...@@ -1100,9 +1103,18 @@ int rndis_filter_device_add(struct hv_device *dev, ...@@ -1100,9 +1103,18 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret || rsscap.num_recv_que < 2) if (ret || rsscap.num_recv_que < 2)
goto out; goto out;
num_rss_qs = min(device_info->max_num_vrss_chns, rsscap.num_recv_que);
net_device->max_chn = rsscap.num_recv_que; net_device->max_chn = rsscap.num_recv_que;
net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
num_online_cpus() : rsscap.num_recv_que; /*
* We will limit the VRSS channels to the number CPUs in the NUMA node
* the primary channel is currently bound to.
*/
node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
num_possible_rss_qs = cpumask_weight(node_cpu_mask);
net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
if (net_device->num_chn == 1) if (net_device->num_chn == 1)
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment