Commit a10b563d authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: reuse ring helpers on .ndo_open() path

Ring allocation helpers encapsulate all ring allocation and
initialization steps nicely.  Reuse them on .ndo_open() path.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0ae42dfc
...@@ -2051,6 +2051,13 @@ static void nfp_net_open_stack(struct nfp_net *nn) ...@@ -2051,6 +2051,13 @@ static void nfp_net_open_stack(struct nfp_net *nn)
static int nfp_net_netdev_open(struct net_device *netdev) static int nfp_net_netdev_open(struct net_device *netdev)
{ {
struct nfp_net *nn = netdev_priv(netdev); struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_ring_set rx = {
.mtu = nn->netdev->mtu,
.dcnt = nn->rxd_cnt,
};
struct nfp_net_ring_set tx = {
.dcnt = nn->txd_cnt,
};
int err, r; int err, r;
if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) { if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
...@@ -2075,38 +2082,22 @@ static int nfp_net_netdev_open(struct net_device *netdev) ...@@ -2075,38 +2082,22 @@ static int nfp_net_netdev_open(struct net_device *netdev)
goto err_free_exn; goto err_free_exn;
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
GFP_KERNEL);
if (!nn->rx_rings) {
err = -ENOMEM;
goto err_free_lsc;
}
nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
GFP_KERNEL);
if (!nn->tx_rings) {
err = -ENOMEM;
goto err_free_rx_rings;
}
for (r = 0; r < nn->num_r_vecs; r++) { for (r = 0; r < nn->num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
if (err) if (err)
goto err_cleanup_vec_p; goto err_cleanup_vec_p;
} }
for (r = 0; r < nn->num_tx_rings; r++) {
err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt); nn->rx_rings = nfp_net_rx_ring_set_prepare(nn, &rx);
if (err) if (!nn->rx_rings) {
goto err_free_tx_ring_p; err = -ENOMEM;
goto err_cleanup_vec;
} }
for (r = 0; r < nn->num_rx_rings; r++) {
err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
nn->fl_bufsz, nn->rxd_cnt);
if (err)
goto err_flush_free_rx_ring_p;
err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring); nn->tx_rings = nfp_net_tx_ring_set_prepare(nn, &tx);
if (err) if (!nn->tx_rings) {
goto err_free_rx_ring_p; err = -ENOMEM;
goto err_free_rx_rings;
} }
err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings); err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
...@@ -2139,25 +2130,14 @@ static int nfp_net_netdev_open(struct net_device *netdev) ...@@ -2139,25 +2130,14 @@ static int nfp_net_netdev_open(struct net_device *netdev)
return 0; return 0;
err_free_rings: err_free_rings:
r = nn->num_rx_rings; nfp_net_tx_ring_set_free(nn, &tx);
err_flush_free_rx_ring_p: err_free_rx_rings:
while (r--) { nfp_net_rx_ring_set_free(nn, &rx);
nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring); err_cleanup_vec:
err_free_rx_ring_p:
nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
}
r = nn->num_tx_rings;
err_free_tx_ring_p:
while (r--)
nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
r = nn->num_r_vecs; r = nn->num_r_vecs;
err_cleanup_vec_p: err_cleanup_vec_p:
while (r--) while (r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
kfree(nn->tx_rings);
err_free_rx_rings:
kfree(nn->rx_rings);
err_free_lsc:
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
err_free_exn: err_free_exn:
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment