Commit 5f08cd3d authored by Shailend Chand's avatar Shailend Chand Committed by Jakub Kicinski

gve: Alloc before freeing when adjusting queues

Previously, existing queues were being freed before the resources for
the new queues were being allocated. This would take down the interface
if someone were to attempt to change queue counts under a resource
crunch.
Signed-off-by: default avatarShailend Chand <shailend@google.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Reviewed-by: default avatarJeroen de Borst <jeroendb@google.com>
Link: https://lore.kernel.org/r/20240122182632.1102721-6-shailend@google.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 92a6d7a4
...@@ -1869,42 +1869,87 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) ...@@ -1869,42 +1869,87 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
} }
} }
int gve_adjust_queues(struct gve_priv *priv, static int gve_adjust_config(struct gve_priv *priv,
struct gve_queue_config new_rx_config, struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_queue_config new_tx_config) struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{ {
int err; int err;
if (netif_carrier_ok(priv->dev)) { /* Allocate resources for the new confiugration */
/* To make this process as simple as possible we teardown the err = gve_queues_mem_alloc(priv, qpls_alloc_cfg,
* device, set the new configuration, and then bring the device tx_alloc_cfg, rx_alloc_cfg);
* up again. if (err) {
*/ netif_err(priv, drv, priv->dev,
"Adjust config failed to alloc new queues");
return err;
}
/* Teardown the device and free existing resources */
err = gve_close(priv->dev); err = gve_close(priv->dev);
/* we have already tried to reset in close, if (err) {
* just fail at this point netif_err(priv, drv, priv->dev,
*/ "Adjust config failed to close old queues");
if (err) gve_queues_mem_free(priv, qpls_alloc_cfg,
tx_alloc_cfg, rx_alloc_cfg);
return err; return err;
priv->tx_cfg = new_tx_config; }
priv->rx_cfg = new_rx_config;
err = gve_open(priv->dev); /* Bring the device back up again with the new resources. */
if (err) err = gve_queues_start(priv, qpls_alloc_cfg,
goto err; tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n");
/* No need to free on error: ownership of resources is lost after
* calling gve_queues_start.
*/
gve_turndown(priv);
return err;
}
return 0; return 0;
}
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_qpl_config new_qpl_cfg;
int err;
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
/* qpl_cfg is not read-only, it contains a map that gets updated as
* rings are allocated, which is why we cannot use the yet unreleased
* one in priv.
*/
qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
/* Relay the new config from ethtool */
qpls_alloc_cfg.tx_cfg = &new_tx_config;
tx_alloc_cfg.qcfg = &new_tx_config;
rx_alloc_cfg.qcfg_tx = &new_tx_config;
qpls_alloc_cfg.rx_cfg = &new_rx_config;
rx_alloc_cfg.qcfg = &new_rx_config;
tx_alloc_cfg.num_rings = new_tx_config.num_queues;
if (netif_carrier_ok(priv->dev)) {
err = gve_adjust_config(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
return err;
} }
/* Set the config for the next up. */ /* Set the config for the next up. */
priv->tx_cfg = new_tx_config; priv->tx_cfg = new_tx_config;
priv->rx_cfg = new_rx_config; priv->rx_cfg = new_rx_config;
return 0; return 0;
err:
netif_err(priv, drv, priv->dev,
"Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
gve_turndown(priv);
return err;
} }
static void gve_turndown(struct gve_priv *priv) static void gve_turndown(struct gve_priv *priv)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment