Commit 5a052d62 authored by Sudarsana Reddy Kalluru's avatar Sudarsana Reddy Kalluru Committed by David S. Miller

qede: Honor user request for Tx buffers

Driver always allocates the maximal number of tx-buffers irrespective of
actual Tx ring config.
Signed-off-by: default avatarSudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ba798b5b
...@@ -1297,7 +1297,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1297,7 +1297,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
} }
/* Fill the entry in the SW ring and the BDs in the FW ring */ /* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; idx = txq->sw_tx_prod;
txq->sw_tx_ring.skbs[idx].skb = skb; txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = qed_chain_produce(&txq->tx_pbl); first_bd = qed_chain_produce(&txq->tx_pbl);
memset(first_bd, 0, sizeof(*first_bd)); memset(first_bd, 0, sizeof(*first_bd));
...@@ -1317,7 +1317,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1317,7 +1317,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
/* update the first BD with the actual num BDs */ /* update the first BD with the actual num BDs */
first_bd->data.nbds = 1; first_bd->data.nbds = 1;
txq->sw_tx_prod++; txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* 'next page' entries are counted in the producer value */ /* 'next page' entries are counted in the producer value */
val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
txq->tx_db.data.bd_prod = val; txq->tx_db.data.bd_prod = val;
...@@ -1351,7 +1351,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1351,7 +1351,7 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
txq->sw_tx_cons++; txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->sw_tx_ring.skbs[idx].skb = NULL; txq->sw_tx_ring.skbs[idx].skb = NULL;
return 0; return 0;
......
...@@ -99,7 +99,7 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy) ...@@ -99,7 +99,7 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
/* Unmap the data and free skb */ /* Unmap the data and free skb */
int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len) int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
{ {
u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; u16 idx = txq->sw_tx_cons;
struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_1st_bd *first_bd; struct eth_tx_1st_bd *first_bd;
struct eth_tx_bd *tx_data_bd; struct eth_tx_bd *tx_data_bd;
...@@ -156,7 +156,7 @@ static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq, ...@@ -156,7 +156,7 @@ static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
struct eth_tx_1st_bd *first_bd, struct eth_tx_1st_bd *first_bd,
int nbd, bool data_split) int nbd, bool data_split)
{ {
u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; u16 idx = txq->sw_tx_prod;
struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
struct eth_tx_bd *tx_data_bd; struct eth_tx_bd *tx_data_bd;
int i, split_bd_len = 0; int i, split_bd_len = 0;
...@@ -333,8 +333,8 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, ...@@ -333,8 +333,8 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
struct sw_rx_data *metadata, u16 padding, u16 length) struct sw_rx_data *metadata, u16 padding, u16 length)
{ {
struct qede_tx_queue *txq = fp->xdp_tx; struct qede_tx_queue *txq = fp->xdp_tx;
u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
struct eth_tx_1st_bd *first_bd; struct eth_tx_1st_bd *first_bd;
u16 idx = txq->sw_tx_prod;
if (!qed_chain_get_elem_left(&txq->tx_pbl)) { if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
txq->stopped_cnt++; txq->stopped_cnt++;
...@@ -363,7 +363,7 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, ...@@ -363,7 +363,7 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
txq->sw_tx_ring.xdp[idx].page = metadata->data; txq->sw_tx_ring.xdp[idx].page = metadata->data;
txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping; txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
txq->sw_tx_prod++; txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* Mark the fastpath for future XDP doorbell */ /* Mark the fastpath for future XDP doorbell */
fp->xdp_xmit = 1; fp->xdp_xmit = 1;
...@@ -393,14 +393,14 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -393,14 +393,14 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
qed_chain_consume(&txq->tx_pbl); qed_chain_consume(&txq->tx_pbl);
idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; idx = txq->sw_tx_cons;
dma_unmap_page(&edev->pdev->dev, dma_unmap_page(&edev->pdev->dev,
txq->sw_tx_ring.xdp[idx].mapping, txq->sw_tx_ring.xdp[idx].mapping,
PAGE_SIZE, DMA_BIDIRECTIONAL); PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(txq->sw_tx_ring.xdp[idx].page); __free_page(txq->sw_tx_ring.xdp[idx].page);
txq->sw_tx_cons++; txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->xmit_pkts++; txq->xmit_pkts++;
} }
} }
...@@ -430,7 +430,7 @@ static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -430,7 +430,7 @@ static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
bytes_compl += len; bytes_compl += len;
pkts_compl++; pkts_compl++;
txq->sw_tx_cons++; txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->xmit_pkts++; txq->xmit_pkts++;
} }
...@@ -1455,7 +1455,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1455,7 +1455,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
#endif #endif
/* Fill the entry in the SW ring and the BDs in the FW ring */ /* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; idx = txq->sw_tx_prod;
txq->sw_tx_ring.skbs[idx].skb = skb; txq->sw_tx_ring.skbs[idx].skb = skb;
first_bd = (struct eth_tx_1st_bd *) first_bd = (struct eth_tx_1st_bd *)
qed_chain_produce(&txq->tx_pbl); qed_chain_produce(&txq->tx_pbl);
...@@ -1639,7 +1639,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1639,7 +1639,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Advance packet producer only before sending the packet since mapping /* Advance packet producer only before sending the packet since mapping
* of pages may fail. * of pages may fail.
*/ */
txq->sw_tx_prod++; txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* 'next page' entries are counted in the producer value */ /* 'next page' entries are counted in the producer value */
txq->tx_db.data.bd_prod = txq->tx_db.data.bd_prod =
......
...@@ -1304,12 +1304,12 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -1304,12 +1304,12 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
/* Allocate the parallel driver ring for Tx buffers */ /* Allocate the parallel driver ring for Tx buffers */
if (txq->is_xdp) { if (txq->is_xdp) {
size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE; size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL); txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring.xdp) if (!txq->sw_tx_ring.xdp)
goto err; goto err;
} else { } else {
size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE; size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL); txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring.skbs) if (!txq->sw_tx_ring.skbs)
goto err; goto err;
...@@ -1319,7 +1319,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -1319,7 +1319,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL, QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
TX_RING_SIZE, txq->num_tx_buffers,
sizeof(*p_virt), &txq->tx_pbl); sizeof(*p_virt), &txq->tx_pbl);
if (rc) if (rc)
goto err; goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment