Commit 5f449249 authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

bnxt_en: Add a set of TX rings to support XDP.

Add logic for an extra set of TX rings for XDP.  If enabled, this
set of TX rings equals the number of RX rings and shares the same
IRQ as the RX ring set.  A new field bp->tx_nr_rings_xdp is added
to keep track of these TX XDP rings.  Adjust all other relevant functions
to handle bp->tx_nr_rings_xdp.
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a960dec9
...@@ -2218,6 +2218,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) ...@@ -2218,6 +2218,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
} }
ring->queue_id = bp->q_info[j].queue_id; ring->queue_id = bp->q_info[j].queue_id;
if (i < bp->tx_nr_rings_xdp)
continue;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
j++; j++;
} }
...@@ -3042,8 +3044,10 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) ...@@ -3042,8 +3044,10 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
for (i = 0; i < bp->tx_nr_rings; i++, j++) { for (i = 0; i < bp->tx_nr_rings; i++, j++) {
bp->tx_ring[i].bnapi = bp->bnapi[j]; bp->tx_ring[i].bnapi = bp->bnapi[j];
bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
bp->tx_ring_map[i] = i; bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
bp->tx_ring[i].txq_index = i; if (i >= bp->tx_nr_rings_xdp)
bp->tx_ring[i].txq_index = i -
bp->tx_nr_rings_xdp;
} }
rc = bnxt_alloc_stats(bp); rc = bnxt_alloc_stats(bp);
...@@ -4966,7 +4970,8 @@ static int bnxt_set_real_num_queues(struct bnxt *bp) ...@@ -4966,7 +4970,8 @@ static int bnxt_set_real_num_queues(struct bnxt *bp)
int rc; int rc;
struct net_device *dev = bp->dev; struct net_device *dev = bp->dev;
rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings); rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
bp->tx_nr_rings_xdp);
if (rc) if (rc)
return rc; return rc;
...@@ -6582,7 +6587,7 @@ static void bnxt_sp_task(struct work_struct *work) ...@@ -6582,7 +6587,7 @@ static void bnxt_sp_task(struct work_struct *work)
} }
/* Under rtnl_lock */ /* Under rtnl_lock */
int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs) int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp)
{ {
int max_rx, max_tx, tx_sets = 1; int max_rx, max_tx, tx_sets = 1;
int tx_rings_needed; int tx_rings_needed;
...@@ -6602,12 +6607,12 @@ int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs) ...@@ -6602,12 +6607,12 @@ int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs)
if (max_rx < rx) if (max_rx < rx)
return -ENOMEM; return -ENOMEM;
tx_rings_needed = tx * tx_sets; tx_rings_needed = tx * tx_sets + tx_xdp;
if (max_tx < tx_rings_needed) if (max_tx < tx_rings_needed)
return -ENOMEM; return -ENOMEM;
if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) || if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) ||
tx_rings_needed < (tx * tx_sets)) tx_rings_needed < (tx * tx_sets + tx_xdp))
return -ENOMEM; return -ENOMEM;
return 0; return 0;
} }
...@@ -6788,8 +6793,8 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) ...@@ -6788,8 +6793,8 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (bp->flags & BNXT_FLAG_SHARED_RINGS) if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true; sh = true;
rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
bp->rx_nr_rings, tc); tc, bp->tx_nr_rings_xdp);
if (rc) if (rc)
return rc; return rc;
......
...@@ -1026,6 +1026,7 @@ struct bnxt { ...@@ -1026,6 +1026,7 @@ struct bnxt {
int tx_nr_pages; int tx_nr_pages;
int tx_nr_rings; int tx_nr_rings;
int tx_nr_rings_per_tc; int tx_nr_rings_per_tc;
int tx_nr_rings_xdp;
int tx_wake_thresh; int tx_wake_thresh;
int tx_push_thresh; int tx_push_thresh;
...@@ -1203,7 +1204,7 @@ int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); ...@@ -1203,7 +1204,7 @@ int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_hwrm_fw_set_time(struct bnxt *); int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool); int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs); int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
void bnxt_restore_pf_fw_resources(struct bnxt *bp); void bnxt_restore_pf_fw_resources(struct bnxt *bp);
......
...@@ -389,6 +389,7 @@ static int bnxt_set_channels(struct net_device *dev, ...@@ -389,6 +389,7 @@ static int bnxt_set_channels(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
int req_tx_rings, req_rx_rings, tcs; int req_tx_rings, req_rx_rings, tcs;
bool sh = false; bool sh = false;
int tx_xdp = 0;
int rc = 0; int rc = 0;
if (channel->other_count) if (channel->other_count)
...@@ -413,7 +414,14 @@ static int bnxt_set_channels(struct net_device *dev, ...@@ -413,7 +414,14 @@ static int bnxt_set_channels(struct net_device *dev,
req_tx_rings = sh ? channel->combined_count : channel->tx_count; req_tx_rings = sh ? channel->combined_count : channel->tx_count;
req_rx_rings = sh ? channel->combined_count : channel->rx_count; req_rx_rings = sh ? channel->combined_count : channel->rx_count;
rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs); if (bp->tx_nr_rings_xdp) {
if (!sh) {
netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
return -EINVAL;
}
tx_xdp = req_rx_rings;
}
rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs, tx_xdp);
if (rc) { if (rc) {
netdev_warn(dev, "Unable to allocate the requested rings\n"); netdev_warn(dev, "Unable to allocate the requested rings\n");
return rc; return rc;
...@@ -442,10 +450,10 @@ static int bnxt_set_channels(struct net_device *dev, ...@@ -442,10 +450,10 @@ static int bnxt_set_channels(struct net_device *dev,
bp->rx_nr_rings = channel->rx_count; bp->rx_nr_rings = channel->rx_count;
bp->tx_nr_rings_per_tc = channel->tx_count; bp->tx_nr_rings_per_tc = channel->tx_count;
} }
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc; bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
if (tcs > 1) if (tcs > 1)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings; bp->tx_nr_rings + bp->rx_nr_rings;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment