Commit 38413406 authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

bnxt_en: Add support for XDP_TX action.

Add dedicated transmit function and transmit completion handler for
XDP.  The XDP transmit logic and completion logic are different than
regular TX ring.  The TX buffer is recycled back to the RX ring when
it completes.

v3: Improved the buffer recyling scheme for XDP_TX.

v2: Add trace_xdp_exception().
    Add dma_sync.
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Tested-by: default avatarAndy Gospodarek <gospo@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c6d30e83
...@@ -212,16 +212,7 @@ static bool bnxt_vf_pciid(enum board_idx idx) ...@@ -212,16 +212,7 @@ static bool bnxt_vf_pciid(enum board_idx idx)
#define BNXT_CP_DB_IRQ_DIS(db) \ #define BNXT_CP_DB_IRQ_DIS(db) \
writel(DB_CP_IRQ_DIS_FLAGS, db) writel(DB_CP_IRQ_DIS_FLAGS, db)
static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) const u16 bnxt_lhint_arr[] = {
{
/* Tell compiler to fetch tx indices from memory. */
barrier();
return bp->tx_ring_size -
((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
}
static const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_512_AND_SMALLER, TX_BD_FLAGS_LHINT_512_AND_SMALLER,
TX_BD_FLAGS_LHINT_512_TO_1023, TX_BD_FLAGS_LHINT_512_TO_1023,
TX_BD_FLAGS_LHINT_1024_TO_2047, TX_BD_FLAGS_LHINT_1024_TO_2047,
...@@ -613,8 +604,7 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, ...@@ -613,8 +604,7 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
return data; return data;
} }
static inline int bnxt_alloc_rx_data(struct bnxt *bp, int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp) u16 prod, gfp_t gfp)
{ {
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
...@@ -1766,6 +1756,18 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) ...@@ -1766,6 +1756,18 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
break; break;
} }
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
void __iomem *db = txr->tx_doorbell;
u16 prod = txr->tx_prod;
/* Sync BD data before updating doorbell */
wmb();
writel(DB_KEY_TX | prod, db);
writel(DB_KEY_TX | prod, db);
}
cpr->cp_raw_cons = raw_cons; cpr->cp_raw_cons = raw_cons;
/* ACK completion ring before freeing tx ring and producing new /* ACK completion ring before freeing tx ring and producing new
* buffers in rx/agg rings to prevent overflowing the completion * buffers in rx/agg rings to prevent overflowing the completion
...@@ -3066,12 +3068,14 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) ...@@ -3066,12 +3068,14 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
bp->tx_ring[i].bnapi = bp->bnapi[j]; bp->tx_ring[i].bnapi = bp->bnapi[j];
bp->bnapi[j]->tx_ring = &bp->tx_ring[i]; bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
if (i >= bp->tx_nr_rings_xdp) if (i >= bp->tx_nr_rings_xdp) {
bp->tx_ring[i].txq_index = i - bp->tx_ring[i].txq_index = i -
bp->tx_nr_rings_xdp; bp->tx_nr_rings_xdp;
else
bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
bp->bnapi[j]->tx_int = bnxt_tx_int; bp->bnapi[j]->tx_int = bnxt_tx_int;
} else {
bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
}
} }
rc = bnxt_alloc_stats(bp); rc = bnxt_alloc_stats(bp);
......
...@@ -514,13 +514,17 @@ struct rx_tpa_end_cmp_ext { ...@@ -514,13 +514,17 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_RX_EVENT 1 #define BNXT_RX_EVENT 1
#define BNXT_AGG_EVENT 2 #define BNXT_AGG_EVENT 2
#define BNXT_TX_EVENT 4
struct bnxt_sw_tx_bd { struct bnxt_sw_tx_bd {
struct sk_buff *skb; struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(mapping); DEFINE_DMA_UNMAP_ADDR(mapping);
u8 is_gso; u8 is_gso;
u8 is_push; u8 is_push;
union {
unsigned short nr_frags; unsigned short nr_frags;
u16 rx_prod;
};
}; };
struct bnxt_sw_rx_bd { struct bnxt_sw_rx_bd {
...@@ -1191,6 +1195,19 @@ struct bnxt { ...@@ -1191,6 +1195,19 @@ struct bnxt {
#define SFF_MODULE_ID_QSFP28 0x11 #define SFF_MODULE_ID_QSFP28 0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE 64 #define BNXT_MAX_PHY_I2C_RESP_SIZE 64
static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
{
/* Tell compiler to fetch tx indices from memory. */
barrier();
return bp->tx_ring_size -
((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
}
extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp);
void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data); void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
void bnxt_set_tpa_flags(struct bnxt *bp); void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *); void bnxt_set_ring_params(struct bnxt *);
......
...@@ -19,6 +19,65 @@ ...@@ -19,6 +19,65 @@
#include "bnxt.h" #include "bnxt.h"
#include "bnxt_xdp.h" #include "bnxt_xdp.h"
static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
dma_addr_t mapping, u32 len, u16 rx_prod)
{
struct bnxt_sw_tx_bd *tx_buf;
struct tx_bd_ext *txbd1;
struct tx_bd *txbd;
u32 flags;
u16 prod;
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
tx_buf->rx_prod = rx_prod;
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
(2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW |
TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = prod;
txbd->tx_bd_haddr = cpu_to_le64(mapping);
prod = NEXT_TX(prod);
txbd1 = (struct tx_bd_ext *)
&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
txbd1->tx_bd_hsize_lflags = cpu_to_le32(0);
txbd1->tx_bd_mss = cpu_to_le32(0);
txbd1->tx_bd_cfa_action = cpu_to_le32(0);
txbd1->tx_bd_cfa_meta = cpu_to_le32(0);
prod = NEXT_TX(prod);
txr->tx_prod = prod;
}
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
{
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
struct bnxt_sw_tx_bd *tx_buf;
u16 tx_cons = txr->tx_cons;
u16 last_tx_cons = tx_cons;
u16 rx_prod;
int i;
for (i = 0; i < nr_pkts; i++) {
last_tx_cons = tx_cons;
tx_cons = NEXT_TX(tx_cons);
tx_cons = NEXT_TX(tx_cons);
}
txr->tx_cons = tx_cons;
if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
rx_prod = rxr->rx_prod;
} else {
tx_buf = &txr->tx_buf_ring[last_tx_cons];
rx_prod = tx_buf->rx_prod;
}
writel(DB_KEY_RX | rx_prod, rxr->rx_doorbell);
}
/* returns the following: /* returns the following:
* true - packet consumed by XDP and new buffer is allocated. * true - packet consumed by XDP and new buffer is allocated.
* false - packet should be passed to the stack. * false - packet should be passed to the stack.
...@@ -27,11 +86,13 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, ...@@ -27,11 +86,13 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct page *page, u8 **data_ptr, unsigned int *len, u8 *event) struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
{ {
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_rx_bd *rx_buf; struct bnxt_sw_rx_bd *rx_buf;
struct pci_dev *pdev; struct pci_dev *pdev;
struct xdp_buff xdp; struct xdp_buff xdp;
dma_addr_t mapping; dma_addr_t mapping;
void *orig_data; void *orig_data;
u32 tx_avail;
u32 offset; u32 offset;
u32 act; u32 act;
...@@ -39,6 +100,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, ...@@ -39,6 +100,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
return false; return false;
pdev = bp->pdev; pdev = bp->pdev;
txr = rxr->bnapi->tx_ring;
rx_buf = &rxr->rx_buf_ring[cons]; rx_buf = &rxr->rx_buf_ring[cons];
offset = bp->rx_offset; offset = bp->rx_offset;
...@@ -54,6 +116,13 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, ...@@ -54,6 +116,13 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
act = bpf_prog_run_xdp(xdp_prog, &xdp); act = bpf_prog_run_xdp(xdp_prog, &xdp);
rcu_read_unlock(); rcu_read_unlock();
tx_avail = bnxt_tx_avail(bp, txr);
/* If the tx ring is not full, we must not update the rx producer yet
* because we may still be transmitting on some BDs.
*/
if (tx_avail != bp->tx_ring_size)
*event &= ~BNXT_RX_EVENT;
if (orig_data != xdp.data) { if (orig_data != xdp.data) {
offset = xdp.data - xdp.data_hard_start; offset = xdp.data - xdp.data_hard_start;
*data_ptr = xdp.data_hard_start + offset; *data_ptr = xdp.data_hard_start + offset;
...@@ -63,6 +132,20 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, ...@@ -63,6 +132,20 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
case XDP_PASS: case XDP_PASS:
return false; return false;
case XDP_TX:
if (tx_avail < 2) {
trace_xdp_exception(bp->dev, xdp_prog, act);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
*event = BNXT_TX_EVENT;
dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
bp->rx_dir);
bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
NEXT_RX(rxr->rx_prod));
bnxt_reuse_rx_data(rxr, cons, page);
return true;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
/* Fall thru */ /* Fall thru */
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#ifndef BNXT_XDP_H #ifndef BNXT_XDP_H
#define BNXT_XDP_H #define BNXT_XDP_H
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct page *page, u8 **data_ptr, unsigned int *len, struct page *page, u8 **data_ptr, unsigned int *len,
u8 *event); u8 *event);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment