Commit 92a6d7a4 authored by Shailend Chand's avatar Shailend Chand Committed by Jakub Kicinski

gve: Refactor gve_open and gve_close

gve_open is rewritten to be composed of two funcs: gve_queues_mem_alloc
and gve_queues_start. The former only allocates queue resources without
doing anything to install the queues, which is taken up by the latter.
Similarly gve_close is split into gve_queues_stop and
gve_queues_mem_free.

Separating the acts of queue resource allocation and making the queue
become live help with subsequent changes that aim to not take down the
datapath when applying new configurations.
Signed-off-by: default avatarShailend Chand <shailend@google.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Reviewed-by: default avatarJeroen de Borst <jeroendb@google.com>
Link: https://lore.kernel.org/r/20240122182632.1102721-5-shailend@google.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent f13697cc
...@@ -1350,45 +1350,99 @@ static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings) ...@@ -1350,45 +1350,99 @@ static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
} }
} }
static int gve_open(struct net_device *dev) static void gve_queues_mem_free(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
gve_free_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
gve_free_qpls(priv, qpls_alloc_cfg);
}
static int gve_queues_mem_alloc(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
err = gve_alloc_qpls(priv, qpls_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
return err;
}
tx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
rx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
err = gve_alloc_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev, "Failed to alloc rings\n");
goto free_qpls;
}
return 0;
free_qpls:
gve_free_qpls(priv, qpls_alloc_cfg);
return err;
}
static void gve_queues_mem_remove(struct gve_priv *priv)
{ {
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0}; struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(dev);
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
gve_queues_mem_free(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
priv->qpls = NULL;
priv->tx = NULL;
priv->rx = NULL;
}
/* The passed-in queue memory is stored into priv and the queues are made live.
* No memory is allocated. Passed-in memory is freed on errors.
*/
static int gve_queues_start(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
struct net_device *dev = priv->dev;
int err; int err;
/* Record new resources into priv */
priv->qpls = qpls_alloc_cfg->qpls;
priv->tx = tx_alloc_cfg->tx;
priv->rx = rx_alloc_cfg->rx;
/* Record new configs into priv */
priv->qpl_cfg = *qpls_alloc_cfg->qpl_cfg;
priv->tx_cfg = *tx_alloc_cfg->qcfg;
priv->rx_cfg = *rx_alloc_cfg->qcfg;
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
if (priv->xdp_prog) if (priv->xdp_prog)
priv->num_xdp_queues = priv->rx_cfg.num_queues; priv->num_xdp_queues = priv->rx_cfg.num_queues;
else else
priv->num_xdp_queues = 0; priv->num_xdp_queues = 0;
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg, gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
&tx_alloc_cfg, &rx_alloc_cfg); gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
err = gve_alloc_qpls(priv, &qpls_alloc_cfg);
if (err)
return err;
priv->qpls = qpls_alloc_cfg.qpls;
tx_alloc_cfg.qpls = priv->qpls;
rx_alloc_cfg.qpls = priv->qpls;
err = gve_alloc_rings(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
goto free_qpls;
gve_tx_start_rings(priv, 0, tx_alloc_cfg.num_rings);
gve_rx_start_rings(priv, rx_alloc_cfg.qcfg->num_queues);
gve_init_sync_stats(priv); gve_init_sync_stats(priv);
err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues); err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
if (err) if (err)
goto free_rings; goto stop_and_free_rings;
err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues); err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
if (err) if (err)
goto free_rings; goto stop_and_free_rings;
err = gve_reg_xdp_info(priv, dev); err = gve_reg_xdp_info(priv, dev);
if (err) if (err)
goto free_rings; goto stop_and_free_rings;
err = gve_register_qpls(priv); err = gve_register_qpls(priv);
if (err) if (err)
...@@ -1416,29 +1470,22 @@ static int gve_open(struct net_device *dev) ...@@ -1416,29 +1470,22 @@ static int gve_open(struct net_device *dev)
priv->interface_up_cnt++; priv->interface_up_cnt++;
return 0; return 0;
free_rings:
gve_tx_stop_rings(priv, 0, tx_alloc_cfg.num_rings);
gve_rx_stop_rings(priv, rx_alloc_cfg.qcfg->num_queues);
gve_free_rings(priv, &tx_alloc_cfg, &rx_alloc_cfg);
free_qpls:
gve_free_qpls(priv, &qpls_alloc_cfg);
return err;
reset: reset:
/* This must have been called from a reset due to the rtnl lock
* so just return at this point.
*/
if (gve_get_reset_in_progress(priv)) if (gve_get_reset_in_progress(priv))
return err; goto stop_and_free_rings;
/* Otherwise reset before returning */
gve_reset_and_teardown(priv, true); gve_reset_and_teardown(priv, true);
/* if this fails there is nothing we can do so just ignore the return */ /* if this fails there is nothing we can do so just ignore the return */
gve_reset_recovery(priv, false); gve_reset_recovery(priv, false);
/* return the original error */ /* return the original error */
return err; return err;
stop_and_free_rings:
gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
gve_queues_mem_remove(priv);
return err;
} }
static int gve_close(struct net_device *dev) static int gve_open(struct net_device *dev)
{ {
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
...@@ -1446,7 +1493,30 @@ static int gve_close(struct net_device *dev) ...@@ -1446,7 +1493,30 @@ static int gve_close(struct net_device *dev)
struct gve_priv *priv = netdev_priv(dev); struct gve_priv *priv = netdev_priv(dev);
int err; int err;
netif_carrier_off(dev); gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
err = gve_queues_mem_alloc(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
/* No need to free on error: ownership of resources is lost after
* calling gve_queues_start.
*/
err = gve_queues_start(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
return 0;
}
static int gve_queues_stop(struct gve_priv *priv)
{
int err;
netif_carrier_off(priv->dev);
if (gve_get_device_rings_ok(priv)) { if (gve_get_device_rings_ok(priv)) {
gve_turndown(priv); gve_turndown(priv);
gve_drain_page_cache(priv); gve_drain_page_cache(priv);
...@@ -1462,12 +1532,8 @@ static int gve_close(struct net_device *dev) ...@@ -1462,12 +1532,8 @@ static int gve_close(struct net_device *dev)
gve_unreg_xdp_info(priv); gve_unreg_xdp_info(priv);
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg, gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
&tx_alloc_cfg, &rx_alloc_cfg); gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
gve_tx_stop_rings(priv, 0, tx_alloc_cfg.num_rings);
gve_rx_stop_rings(priv, rx_alloc_cfg.qcfg->num_queues);
gve_free_rings(priv, &tx_alloc_cfg, &rx_alloc_cfg);
gve_free_qpls(priv, &qpls_alloc_cfg);
priv->interface_down_cnt++; priv->interface_down_cnt++;
return 0; return 0;
...@@ -1483,6 +1549,19 @@ static int gve_close(struct net_device *dev) ...@@ -1483,6 +1549,19 @@ static int gve_close(struct net_device *dev)
return gve_reset_recovery(priv, false); return gve_reset_recovery(priv, false);
} }
static int gve_close(struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
int err;
err = gve_queues_stop(priv);
if (err)
return err;
gve_queues_mem_remove(priv);
return 0;
}
static int gve_remove_xdp_queues(struct gve_priv *priv) static int gve_remove_xdp_queues(struct gve_priv *priv)
{ {
int qpl_start_id; int qpl_start_id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment