Commit 89e1afc4 authored by Mintz, Yuval's avatar Mintz, Yuval Committed by David S. Miller

qede: Correct XDP forward unmapping

Driver is currently using dma_unmap_single() with the address it
passed to device for the purpose of forwarding, but the XDP
transmission buffer was originally a page allocated for the rx-queue.
The mapped address is likely to differ from the original mapped
address due to the placement offset.

This difference is going to get even bigger once we support headroom.

Cache the original mapped address of the page, and use it for unmapping
of the buffer when completion arrives for the XDP forwarded packet.
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 10a0176e
...@@ -349,6 +349,11 @@ struct sw_tx_bd { ...@@ -349,6 +349,11 @@ struct sw_tx_bd {
#define QEDE_TSO_SPLIT_BD BIT(0) #define QEDE_TSO_SPLIT_BD BIT(0)
}; };
struct sw_tx_xdp {
struct page *page;
dma_addr_t mapping;
};
struct qede_tx_queue { struct qede_tx_queue {
u8 is_xdp; u8 is_xdp;
bool is_legacy; bool is_legacy;
...@@ -372,11 +377,11 @@ struct qede_tx_queue { ...@@ -372,11 +377,11 @@ struct qede_tx_queue {
#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev)) #define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
/* Regular Tx requires skb + metadata for release purpose, /* Regular Tx requires skb + metadata for release purpose,
* while XDP requires only the pages themselves. * while XDP requires the pages and the mapped address.
*/ */
union { union {
struct sw_tx_bd *skbs; struct sw_tx_bd *skbs;
struct page **pages; struct sw_tx_xdp *xdp;
} sw_tx_ring; } sw_tx_ring;
struct qed_chain tx_pbl; struct qed_chain tx_pbl;
......
...@@ -360,7 +360,8 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, ...@@ -360,7 +360,8 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
metadata->mapping + padding, metadata->mapping + padding,
length, PCI_DMA_TODEVICE); length, PCI_DMA_TODEVICE);
txq->sw_tx_ring.pages[idx] = metadata->data; txq->sw_tx_ring.xdp[idx].page = metadata->data;
txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
txq->sw_tx_prod++; txq->sw_tx_prod++;
/* Mark the fastpath for future XDP doorbell */ /* Mark the fastpath for future XDP doorbell */
...@@ -384,19 +385,19 @@ int qede_txq_has_work(struct qede_tx_queue *txq) ...@@ -384,19 +385,19 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{ {
struct eth_tx_1st_bd *bd; u16 hw_bd_cons, idx;
u16 hw_bd_cons;
hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
barrier(); barrier();
while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); qed_chain_consume(&txq->tx_pbl);
idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd), dma_unmap_page(&edev->pdev->dev,
PAGE_SIZE, DMA_BIDIRECTIONAL); txq->sw_tx_ring.xdp[idx].mapping,
__free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons & PAGE_SIZE, DMA_BIDIRECTIONAL);
NUM_TX_BDS_MAX]); __free_page(txq->sw_tx_ring.xdp[idx].page);
txq->sw_tx_cons++; txq->sw_tx_cons++;
txq->xmit_pkts++; txq->xmit_pkts++;
......
...@@ -1251,7 +1251,7 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -1251,7 +1251,7 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{ {
/* Free the parallel SW ring */ /* Free the parallel SW ring */
if (txq->is_xdp) if (txq->is_xdp)
kfree(txq->sw_tx_ring.pages); kfree(txq->sw_tx_ring.xdp);
else else
kfree(txq->sw_tx_ring.skbs); kfree(txq->sw_tx_ring.skbs);
...@@ -1269,9 +1269,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -1269,9 +1269,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
/* Allocate the parallel driver ring for Tx buffers */ /* Allocate the parallel driver ring for Tx buffers */
if (txq->is_xdp) { if (txq->is_xdp) {
size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE; size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE;
txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL); txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring.pages) if (!txq->sw_tx_ring.xdp)
goto err; goto err;
} else { } else {
size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE; size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment