Commit 340bf2db authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'bnxt_en-prepare-to-support-new-p7-chips'

Michael Chan says:

====================
bnxt_en: Prepare to support new P7 chips

This patchset is to prepare the driver to support the new P7 chips by
refactoring and modifying the code.  The P7 chip is built on the P5
chip and many code paths can be modified to support both chips.  The
whole patchset to have basic support for P7 chips is about 20 patches so
a follow-on patchset will complete the support and add the new PCI IDs.

The first 8 patches are changes to the backing store logic to support
both chips with mostly common code paths and datastructures.  Both
chips require host backing store memory but the relevant firmware APIs
have been modified to make it easier to support new backing store
memory types.

The next 4 patches are changes to TX and RX ring indexing logic and NAPI
logic.  The main changes are to increment the TX and RX producers
unbounded and to do any masking only when needed.  These changes are
needed to support the P7 chips which require additional higher bits in
these producer indices.  The NAPI logic is also slightly modifed.

The last patch is a rename of BNXT_FLAG_CHIP_P5 to BNXT_FLAG_P5_PLUS and
other related macro changes to make it clear that the P5_PLUS macro
applies to P5 and newer chips.
====================

Link: https://lore.kernel.org/r/20231120234405.194542-1-michael.chan@broadcom.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 46e208e7 1c7fd6ee
...@@ -254,22 +254,22 @@ static bool bnxt_vf_pciid(enum board_idx idx) ...@@ -254,22 +254,22 @@ static bool bnxt_vf_pciid(enum board_idx idx)
writel(DB_CP_IRQ_DIS_FLAGS, db) writel(DB_CP_IRQ_DIS_FLAGS, db)
#define BNXT_DB_CQ(db, idx) \ #define BNXT_DB_CQ(db, idx) \
writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell) writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
#define BNXT_DB_NQ_P5(db, idx) \ #define BNXT_DB_NQ_P5(db, idx) \
bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\
(db)->doorbell) (db)->doorbell)
#define BNXT_DB_CQ_ARM(db, idx) \ #define BNXT_DB_CQ_ARM(db, idx) \
writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell) writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
#define BNXT_DB_NQ_ARM_P5(db, idx) \ #define BNXT_DB_NQ_ARM_P5(db, idx) \
bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \
(db)->doorbell) DB_RING_IDX(db, idx), (db)->doorbell)
static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
BNXT_DB_NQ_P5(db, idx); BNXT_DB_NQ_P5(db, idx);
else else
BNXT_DB_CQ(db, idx); BNXT_DB_CQ(db, idx);
...@@ -277,7 +277,7 @@ static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) ...@@ -277,7 +277,7 @@ static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
BNXT_DB_NQ_ARM_P5(db, idx); BNXT_DB_NQ_ARM_P5(db, idx);
else else
BNXT_DB_CQ_ARM(db, idx); BNXT_DB_CQ_ARM(db, idx);
...@@ -285,9 +285,9 @@ static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) ...@@ -285,9 +285,9 @@ static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
RING_CMP(idx), db->doorbell); DB_RING_IDX(db, idx), db->doorbell);
else else
BNXT_DB_CQ(db, idx); BNXT_DB_CQ(db, idx);
} }
...@@ -321,7 +321,7 @@ static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) ...@@ -321,7 +321,7 @@ static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{ {
if (!rxr->bnapi->in_reset) { if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true; rxr->bnapi->in_reset = true;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
else else
set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
...@@ -432,9 +432,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -432,9 +432,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb_headlen(skb); len = skb_headlen(skb);
last_frag = skb_shinfo(skb)->nr_frags; last_frag = skb_shinfo(skb)->nr_frags;
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
tx_buf = &txr->tx_buf_ring[prod]; tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
tx_buf->skb = skb; tx_buf->skb = skb;
tx_buf->nr_frags = last_frag; tx_buf->nr_frags = last_frag;
...@@ -522,11 +522,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -522,11 +522,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2); txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2);
prod = NEXT_TX(prod); prod = NEXT_TX(prod);
tx_push->tx_bd_opaque = txbd->tx_bd_opaque; tx_push->tx_bd_opaque = txbd->tx_bd_opaque;
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
memcpy(txbd, tx_push1, sizeof(*txbd)); memcpy(txbd, tx_push1, sizeof(*txbd));
prod = NEXT_TX(prod); prod = NEXT_TX(prod);
tx_push->doorbell = tx_push->doorbell =
cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH |
DB_RING_IDX(&txr->tx_db, prod));
WRITE_ONCE(txr->tx_prod, prod); WRITE_ONCE(txr->tx_prod, prod);
tx_buf->is_push = 1; tx_buf->is_push = 1;
...@@ -568,7 +569,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -568,7 +569,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
prod = NEXT_TX(prod); prod = NEXT_TX(prod);
txbd1 = (struct tx_bd_ext *) txbd1 = (struct tx_bd_ext *)
&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
txbd1->tx_bd_hsize_lflags = lflags; txbd1->tx_bd_hsize_lflags = lflags;
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
...@@ -609,7 +610,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -609,7 +610,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod); prod = NEXT_TX(prod);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
len = skb_frag_size(frag); len = skb_frag_size(frag);
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
...@@ -618,7 +619,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -618,7 +619,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(dma_mapping_error(&pdev->dev, mapping))) if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
goto tx_dma_error; goto tx_dma_error;
tx_buf = &txr->tx_buf_ring[prod]; tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_addr_set(tx_buf, mapping, mapping); dma_unmap_addr_set(tx_buf, mapping, mapping);
txbd->tx_bd_haddr = cpu_to_le64(mapping); txbd->tx_bd_haddr = cpu_to_le64(mapping);
...@@ -667,7 +668,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -667,7 +668,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* start back at beginning and unmap skb */ /* start back at beginning and unmap skb */
prod = txr->tx_prod; prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod]; tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), DMA_TO_DEVICE); skb_headlen(skb), DMA_TO_DEVICE);
prod = NEXT_TX(prod); prod = NEXT_TX(prod);
...@@ -675,7 +676,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -675,7 +676,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* unmap remaining mapped pages */ /* unmap remaining mapped pages */
for (i = 0; i < last_frag; i++) { for (i = 0; i < last_frag; i++) {
prod = NEXT_TX(prod); prod = NEXT_TX(prod);
tx_buf = &txr->tx_buf_ring[prod]; tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i]),
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -701,12 +702,12 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, ...@@ -701,12 +702,12 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 cons = txr->tx_cons; u16 cons = txr->tx_cons;
int tx_pkts = 0; int tx_pkts = 0;
while (cons != hw_cons) { while (RING_TX(bp, cons) != hw_cons) {
struct bnxt_sw_tx_bd *tx_buf; struct bnxt_sw_tx_bd *tx_buf;
struct sk_buff *skb; struct sk_buff *skb;
int j, last; int j, last;
tx_buf = &txr->tx_buf_ring[cons]; tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
cons = NEXT_TX(cons); cons = NEXT_TX(cons);
skb = tx_buf->skb; skb = tx_buf->skb;
tx_buf->skb = NULL; tx_buf->skb = NULL;
...@@ -730,7 +731,7 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, ...@@ -730,7 +731,7 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
for (j = 0; j < last; j++) { for (j = 0; j < last; j++) {
cons = NEXT_TX(cons); cons = NEXT_TX(cons);
tx_buf = &txr->tx_buf_ring[cons]; tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
dma_unmap_page( dma_unmap_page(
&pdev->dev, &pdev->dev,
dma_unmap_addr(tx_buf, mapping), dma_unmap_addr(tx_buf, mapping),
...@@ -738,7 +739,7 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, ...@@ -738,7 +739,7 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (BNXT_CHIP_P5(bp)) {
/* PTP worker takes ownership of the skb */ /* PTP worker takes ownership of the skb */
if (!bnxt_get_tx_ts_p5(bp, skb)) if (!bnxt_get_tx_ts_p5(bp, skb))
skb = NULL; skb = NULL;
...@@ -820,8 +821,8 @@ static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, ...@@ -820,8 +821,8 @@ static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp) u16 prod, gfp_t gfp)
{ {
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
dma_addr_t mapping; dma_addr_t mapping;
if (BNXT_RX_PAGE_MODE(bp)) { if (BNXT_RX_PAGE_MODE(bp)) {
...@@ -854,9 +855,10 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) ...@@ -854,9 +855,10 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
{ {
u16 prod = rxr->rx_prod; u16 prod = rxr->rx_prod;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
struct bnxt *bp = rxr->bnapi->bp;
struct rx_bd *cons_bd, *prod_bd; struct rx_bd *cons_bd, *prod_bd;
prod_rx_buf = &rxr->rx_buf_ring[prod]; prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
cons_rx_buf = &rxr->rx_buf_ring[cons]; cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf->data = data; prod_rx_buf->data = data;
...@@ -864,8 +866,8 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) ...@@ -864,8 +866,8 @@ void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
prod_rx_buf->mapping = cons_rx_buf->mapping; prod_rx_buf->mapping = cons_rx_buf->mapping;
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)];
prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
} }
...@@ -885,7 +887,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, ...@@ -885,7 +887,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
u16 prod, gfp_t gfp) u16 prod, gfp_t gfp)
{ {
struct rx_bd *rxbd = struct rx_bd *rxbd =
&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf; struct bnxt_sw_rx_agg_bd *rx_agg_buf;
struct page *page; struct page *page;
dma_addr_t mapping; dma_addr_t mapping;
...@@ -902,7 +904,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, ...@@ -902,7 +904,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
__set_bit(sw_prod, rxr->rx_agg_bmap); __set_bit(sw_prod, rxr->rx_agg_bmap);
rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
rx_agg_buf->page = page; rx_agg_buf->page = page;
rx_agg_buf->offset = offset; rx_agg_buf->offset = offset;
...@@ -944,7 +946,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, ...@@ -944,7 +946,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
bool p5_tpa = false; bool p5_tpa = false;
u32 i; u32 i;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true; p5_tpa = true;
for (i = 0; i < agg_bufs; i++) { for (i = 0; i < agg_bufs; i++) {
...@@ -978,13 +980,13 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, ...@@ -978,13 +980,13 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
prod_rx_buf->mapping = cons_rx_buf->mapping; prod_rx_buf->mapping = cons_rx_buf->mapping;
prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
prod_bd->rx_bd_opaque = sw_prod; prod_bd->rx_bd_opaque = sw_prod;
prod = NEXT_RX_AGG(prod); prod = NEXT_RX_AGG(prod);
sw_prod = NEXT_RX_AGG(sw_prod); sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod));
} }
rxr->rx_agg_prod = prod; rxr->rx_agg_prod = prod;
rxr->rx_sw_agg_prod = sw_prod; rxr->rx_sw_agg_prod = sw_prod;
...@@ -1111,7 +1113,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp, ...@@ -1111,7 +1113,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
u32 i, total_frag_len = 0; u32 i, total_frag_len = 0;
bool p5_tpa = false; bool p5_tpa = false;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa) if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa)
p5_tpa = true; p5_tpa = true;
for (i = 0; i < agg_bufs; i++) { for (i = 0; i < agg_bufs; i++) {
...@@ -1266,7 +1268,7 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, ...@@ -1266,7 +1268,7 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
struct rx_tpa_end_cmp *tpa_end = cmp; struct rx_tpa_end_cmp *tpa_end = cmp;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0; return 0;
agg_bufs = TPA_END_AGG_BUFS(tpa_end); agg_bufs = TPA_END_AGG_BUFS(tpa_end);
...@@ -1317,7 +1319,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, ...@@ -1317,7 +1319,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_bd *prod_bd; struct rx_bd *prod_bd;
dma_addr_t mapping; dma_addr_t mapping;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
agg_id = TPA_START_AGG_ID_P5(tpa_start); agg_id = TPA_START_AGG_ID_P5(tpa_start);
agg_id = bnxt_alloc_agg_idx(rxr, agg_id); agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
} else { } else {
...@@ -1326,7 +1328,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, ...@@ -1326,7 +1328,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
cons = tpa_start->rx_tpa_start_cmp_opaque; cons = tpa_start->rx_tpa_start_cmp_opaque;
prod = rxr->rx_prod; prod = rxr->rx_prod;
cons_rx_buf = &rxr->rx_buf_ring[cons]; cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf = &rxr->rx_buf_ring[prod]; prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)];
tpa_info = &rxr->rx_tpa[agg_id]; tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(cons != rxr->rx_next_cons || if (unlikely(cons != rxr->rx_next_cons ||
...@@ -1347,7 +1349,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, ...@@ -1347,7 +1349,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
mapping = tpa_info->mapping; mapping = tpa_info->mapping;
prod_rx_buf->mapping = mapping; prod_rx_buf->mapping = mapping;
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(mapping); prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
...@@ -1380,8 +1382,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, ...@@ -1380,8 +1382,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info->agg_count = 0; tpa_info->agg_count = 0;
rxr->rx_prod = NEXT_RX(prod); rxr->rx_prod = NEXT_RX(prod);
cons = NEXT_RX(cons); cons = RING_RX(bp, NEXT_RX(cons));
rxr->rx_next_cons = NEXT_RX(cons); rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
cons_rx_buf = &rxr->rx_buf_ring[cons]; cons_rx_buf = &rxr->rx_buf_ring[cons];
bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
...@@ -1580,7 +1582,7 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, ...@@ -1580,7 +1582,7 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
skb_shinfo(skb)->gso_size = skb_shinfo(skb)->gso_size =
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
skb_shinfo(skb)->gso_type = tpa_info->gso_type; skb_shinfo(skb)->gso_type = tpa_info->gso_type;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
else else
payload_off = TPA_END_PAYLOAD_OFF(tpa_end); payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
...@@ -1628,7 +1630,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, ...@@ -1628,7 +1630,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL; return NULL;
} }
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
agg_id = TPA_END_AGG_ID_P5(tpa_end); agg_id = TPA_END_AGG_ID_P5(tpa_end);
agg_id = bnxt_lookup_agg_idx(rxr, agg_id); agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
...@@ -1893,7 +1895,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, ...@@ -1893,7 +1895,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO; rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
if (!(bp->flags & BNXT_FLAG_CHIP_P5) && if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
netdev_warn_once(bp->dev, "RX buffer error %x\n", netdev_warn_once(bp->dev, "RX buffer error %x\n",
rx_err); rx_err);
...@@ -2024,7 +2026,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, ...@@ -2024,7 +2026,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) == if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) { RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
u64 ns, ts; u64 ns, ts;
...@@ -2049,7 +2051,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, ...@@ -2049,7 +2051,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
next_rx_no_len: next_rx_no_len:
rxr->rx_prod = NEXT_RX(prod); rxr->rx_prod = NEXT_RX(prod);
rxr->rx_next_cons = NEXT_RX(cons); rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons));
next_rx_no_prod_no_len: next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons; *raw_cons = tmp_raw_cons;
...@@ -2432,7 +2434,7 @@ static int bnxt_async_event_process(struct bnxt *bp, ...@@ -2432,7 +2434,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr; struct bnxt_rx_ring_info *rxr;
u16 grp_idx; u16 grp_idx;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto async_event_process_exit; goto async_event_process_exit;
netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
...@@ -2852,8 +2854,11 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) ...@@ -2852,8 +2854,11 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
for (i = 0; i < cpr->cp_ring_count; i++) { for (i = 0; i < cpr->cp_ring_count; i++) {
struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i];
work_done += __bnxt_poll_work(bp, cpr2, budget - work_done); if (cpr2->had_nqe_notify) {
cpr->has_more_work |= cpr2->has_more_work; work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
cpr->has_more_work |= cpr2->has_more_work;
}
} }
return work_done; return work_done;
} }
...@@ -2871,8 +2876,11 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, ...@@ -2871,8 +2876,11 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
if (cpr2->had_work_done) { if (cpr2->had_work_done) {
db = &cpr2->cp_db; db = &cpr2->cp_db;
bnxt_writeq(bp, db->db_key64 | dbr_type | bnxt_writeq(bp, db->db_key64 | dbr_type |
RING_CMP(cpr2->cp_raw_cons), db->doorbell); DB_RING_IDX(db, cpr2->cp_raw_cons),
db->doorbell);
cpr2->had_work_done = 0; cpr2->had_work_done = 0;
if (dbr_type == DBR_TYPE_CQ_ARMALL)
cpr2->had_nqe_notify = 0;
} }
} }
__bnxt_poll_work_done(bp, bnapi, budget); __bnxt_poll_work_done(bp, bnapi, budget);
...@@ -2931,6 +2939,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) ...@@ -2931,6 +2939,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
idx = BNXT_NQ_HDL_IDX(idx); idx = BNXT_NQ_HDL_IDX(idx);
cpr2 = &cpr->cp_ring_arr[idx]; cpr2 = &cpr->cp_ring_arr[idx];
cpr2->had_nqe_notify = 1;
work_done += __bnxt_poll_work(bp, cpr2, work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done); budget - work_done);
cpr->has_more_work |= cpr2->has_more_work; cpr->has_more_work |= cpr2->has_more_work;
...@@ -3121,20 +3130,20 @@ static void bnxt_free_skbs(struct bnxt *bp) ...@@ -3121,20 +3130,20 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp); bnxt_free_rx_skbs(bp);
} }
static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len) static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
{ {
u8 init_val = mem_init->init_val; u8 init_val = ctxm->init_value;
u16 offset = mem_init->offset; u16 offset = ctxm->init_offset;
u8 *p2 = p; u8 *p2 = p;
int i; int i;
if (!init_val) if (!init_val)
return; return;
if (offset == BNXT_MEM_INVALID_OFFSET) { if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
memset(p, init_val, len); memset(p, init_val, len);
return; return;
} }
for (i = 0; i < len; i += mem_init->size) for (i = 0; i < len; i += ctxm->entry_size)
*(p2 + i + offset) = init_val; *(p2 + i + offset) = init_val;
} }
...@@ -3201,8 +3210,8 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) ...@@ -3201,8 +3210,8 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i]) if (!rmem->pg_arr[i])
return -ENOMEM; return -ENOMEM;
if (rmem->mem_init) if (rmem->ctx_mem)
bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i], bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
rmem->page_size); rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) { if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 && if (i == rmem->nr_pages - 2 &&
...@@ -3249,7 +3258,7 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp) ...@@ -3249,7 +3258,7 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
int i, j; int i, j;
bp->max_tpa = MAX_TPA; bp->max_tpa = MAX_TPA;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (!bp->max_tpa_v2) if (!bp->max_tpa_v2)
return 0; return 0;
bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
...@@ -3264,7 +3273,7 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp) ...@@ -3264,7 +3273,7 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
if (!rxr->rx_tpa) if (!rxr->rx_tpa)
return -ENOMEM; return -ENOMEM;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue; continue;
for (j = 0; j < bp->max_tpa; j++) { for (j = 0; j < bp->max_tpa; j++) {
agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
...@@ -3642,7 +3651,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) ...@@ -3642,7 +3651,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
else else
ring->map_idx = i; ring->map_idx = i;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue; continue;
if (i < bp->rx_nr_rings) { if (i < bp->rx_nr_rings) {
...@@ -3956,7 +3965,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp) ...@@ -3956,7 +3965,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
int num_vnics = 1; int num_vnics = 1;
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS)
num_vnics += bp->rx_nr_rings; num_vnics += bp->rx_nr_rings;
#endif #endif
...@@ -4229,7 +4238,7 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp) ...@@ -4229,7 +4238,7 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
} }
} }
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto vnic_skip_grps; goto vnic_skip_grps;
if (vnic->flags & BNXT_VNIC_RSS_FLAG) if (vnic->flags & BNXT_VNIC_RSS_FLAG)
...@@ -4249,7 +4258,7 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp) ...@@ -4249,7 +4258,7 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
/* Allocate rss table and hash key */ /* Allocate rss table and hash key */
size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
vnic->rss_table_size = size + HW_HASH_KEY_SIZE; vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
...@@ -4359,7 +4368,7 @@ static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, ...@@ -4359,7 +4368,7 @@ static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
int rc; int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
!(bp->flags & BNXT_FLAG_CHIP_P5)) !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return -EOPNOTSUPP; return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
...@@ -4397,7 +4406,7 @@ static void bnxt_init_stats(struct bnxt *bp) ...@@ -4397,7 +4406,7 @@ static void bnxt_init_stats(struct bnxt *bp)
stats = &cpr->stats; stats = &cpr->stats;
rc = bnxt_hwrm_func_qstat_ext(bp, stats); rc = bnxt_hwrm_func_qstat_ext(bp, stats);
if (rc) { if (rc) {
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
mask = (1ULL << 48) - 1; mask = (1ULL << 48) - 1;
else else
mask = -1ULL; mask = -1ULL;
...@@ -4677,7 +4686,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) ...@@ -4677,7 +4686,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
bp->bnapi[i] = bnapi; bp->bnapi[i] = bnapi;
bp->bnapi[i]->index = i; bp->bnapi[i]->index = i;
bp->bnapi[i]->bp = bp; bp->bnapi[i]->bp = bp;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_cp_ring_info *cpr = struct bnxt_cp_ring_info *cpr =
&bp->bnapi[i]->cp_ring; &bp->bnapi[i]->cp_ring;
...@@ -4695,7 +4704,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) ...@@ -4695,7 +4704,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
for (i = 0; i < bp->rx_nr_rings; i++) { for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
rxr->rx_ring_struct.ring_mem.flags = rxr->rx_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG; BNXT_RMEM_RING_PTE_FLAG;
rxr->rx_agg_ring_struct.ring_mem.flags = rxr->rx_agg_ring_struct.ring_mem.flags =
...@@ -4728,7 +4737,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) ...@@ -4728,7 +4737,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_napi *bnapi2; struct bnxt_napi *bnapi2;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
txr->tx_ring_struct.ring_mem.flags = txr->tx_ring_struct.ring_mem.flags =
BNXT_RMEM_RING_PTE_FLAG; BNXT_RMEM_RING_PTE_FLAG;
bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
...@@ -4749,7 +4758,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) ...@@ -4749,7 +4758,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
j++; j++;
} }
txr->bnapi = bnapi2; txr->bnapi = bnapi2;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
txr->tx_cpr = &bnapi2->cp_ring; txr->tx_cpr = &bnapi2->cp_ring;
} }
...@@ -5275,7 +5284,7 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) ...@@ -5275,7 +5284,7 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
nsegs = (MAX_SKB_FRAGS - n) / n; nsegs = (MAX_SKB_FRAGS - n) / n;
} }
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
segs = MAX_TPA_SEGS_P5; segs = MAX_TPA_SEGS_P5;
max_aggs = bp->max_tpa; max_aggs = bp->max_tpa;
} else { } else {
...@@ -5301,7 +5310,7 @@ static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) ...@@ -5301,7 +5310,7 @@ static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return rxr->rx_cpr->cp_ring_struct.fw_ring_id; return rxr->rx_cpr->cp_ring_struct.fw_ring_id;
else else
return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
...@@ -5309,7 +5318,7 @@ static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) ...@@ -5309,7 +5318,7 @@ static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return txr->tx_cpr->cp_ring_struct.fw_ring_id; return txr->tx_cpr->cp_ring_struct.fw_ring_id;
else else
return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
...@@ -5319,7 +5328,7 @@ static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) ...@@ -5319,7 +5328,7 @@ static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
{ {
int entries; int entries;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
else else
entries = HW_HASH_INDEX_SIZE; entries = HW_HASH_INDEX_SIZE;
...@@ -5369,7 +5378,7 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp) ...@@ -5369,7 +5378,7 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) if (BNXT_CHIP_TYPE_NITRO_A0(bp))
return 2; return 2;
...@@ -5415,7 +5424,7 @@ static void ...@@ -5415,7 +5424,7 @@ static void
__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
struct bnxt_vnic_info *vnic) struct bnxt_vnic_info *vnic)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_fill_hw_rss_tbl_p5(bp, vnic); bnxt_fill_hw_rss_tbl_p5(bp, vnic);
else else
bnxt_fill_hw_rss_tbl(bp, vnic); bnxt_fill_hw_rss_tbl(bp, vnic);
...@@ -5440,7 +5449,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) ...@@ -5440,7 +5449,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
struct hwrm_vnic_rss_cfg_input *req; struct hwrm_vnic_rss_cfg_input *req;
int rc; int rc;
if ((bp->flags & BNXT_FLAG_CHIP_P5) || if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ||
vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
return 0; return 0;
...@@ -5605,7 +5614,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) ...@@ -5605,7 +5614,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
if (rc) if (rc)
return rc; return rc;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
req->default_rx_ring_id = req->default_rx_ring_id =
...@@ -5705,7 +5714,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, ...@@ -5705,7 +5714,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
if (rc) if (rc)
return rc; return rc;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
goto vnic_no_ring_grps; goto vnic_no_ring_grps;
/* map ring groups to this vnic */ /* map ring groups to this vnic */
...@@ -5753,7 +5762,7 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) ...@@ -5753,7 +5762,7 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (!rc) { if (!rc) {
u32 flags = le32_to_cpu(resp->flags); u32 flags = le32_to_cpu(resp->flags);
if (!(bp->flags & BNXT_FLAG_CHIP_P5) && if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
bp->flags |= BNXT_FLAG_NEW_RSS_CAP; bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
if (flags & if (flags &
...@@ -5764,14 +5773,14 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) ...@@ -5764,14 +5773,14 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
* VLAN_STRIP_CAP properly. * VLAN_STRIP_CAP properly.
*/ */
if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
(BNXT_CHIP_P5_THOR(bp) && (BNXT_CHIP_P5(bp) &&
!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA; bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA;
bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
if (bp->max_tpa_v2) { if (bp->max_tpa_v2) {
if (BNXT_CHIP_P5_THOR(bp)) if (BNXT_CHIP_P5(bp))
bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
else else
bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2; bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
...@@ -5788,7 +5797,7 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) ...@@ -5788,7 +5797,7 @@ static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
int rc; int rc;
u16 i; u16 i;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0; return 0;
rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
...@@ -5821,7 +5830,7 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) ...@@ -5821,7 +5830,7 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
struct hwrm_ring_grp_free_input *req; struct hwrm_ring_grp_free_input *req;
u16 i; u16 i;
if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return; return;
if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
...@@ -5886,7 +5895,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, ...@@ -5886,7 +5895,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
case HWRM_RING_ALLOC_RX: case HWRM_RING_ALLOC_RX:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
req->length = cpu_to_le32(bp->rx_ring_mask + 1); req->length = cpu_to_le32(bp->rx_ring_mask + 1);
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 flags = 0; u16 flags = 0;
/* Association of rx ring with stats context */ /* Association of rx ring with stats context */
...@@ -5901,7 +5910,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, ...@@ -5901,7 +5910,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
} }
break; break;
case HWRM_RING_ALLOC_AGG: case HWRM_RING_ALLOC_AGG:
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
/* Association of agg ring with rx ring */ /* Association of agg ring with rx ring */
grp_info = &bp->grp_info[ring->grp_idx]; grp_info = &bp->grp_info[ring->grp_idx];
...@@ -5919,7 +5928,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, ...@@ -5919,7 +5928,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
case HWRM_RING_ALLOC_CMPL: case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
req->length = cpu_to_le32(bp->cp_ring_mask + 1); req->length = cpu_to_le32(bp->cp_ring_mask + 1);
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
/* Association of cp ring with nq */ /* Association of cp ring with nq */
grp_info = &bp->grp_info[map_index]; grp_info = &bp->grp_info[map_index];
req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
...@@ -5987,10 +5996,30 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) ...@@ -5987,10 +5996,30 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
} }
} }
static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db,
u32 ring_type)
{
switch (ring_type) {
case HWRM_RING_ALLOC_TX:
db->db_ring_mask = bp->tx_ring_mask;
break;
case HWRM_RING_ALLOC_RX:
db->db_ring_mask = bp->rx_ring_mask;
break;
case HWRM_RING_ALLOC_AGG:
db->db_ring_mask = bp->rx_agg_ring_mask;
break;
case HWRM_RING_ALLOC_CMPL:
case HWRM_RING_ALLOC_NQ:
db->db_ring_mask = bp->cp_ring_mask;
break;
}
}
static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
u32 map_idx, u32 xid) u32 map_idx, u32 xid)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (BNXT_PF(bp)) if (BNXT_PF(bp))
db->doorbell = bp->bar1 + DB_PF_OFFSET_P5; db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
else else
...@@ -6026,6 +6055,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, ...@@ -6026,6 +6055,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
break; break;
} }
} }
bnxt_set_db_mask(bp, db, ring_type);
} }
static int bnxt_hwrm_ring_alloc(struct bnxt *bp) static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
...@@ -6034,7 +6064,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) ...@@ -6034,7 +6064,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
int i, rc = 0; int i, rc = 0;
u32 type; u32 type;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = HWRM_RING_ALLOC_NQ; type = HWRM_RING_ALLOC_NQ;
else else
type = HWRM_RING_ALLOC_CMPL; type = HWRM_RING_ALLOC_CMPL;
...@@ -6070,7 +6100,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) ...@@ -6070,7 +6100,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
struct bnxt_ring_struct *ring; struct bnxt_ring_struct *ring;
u32 map_idx; u32 map_idx;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr; struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
struct bnxt_napi *bnapi = txr->bnapi; struct bnxt_napi *bnapi = txr->bnapi;
u32 type2 = HWRM_RING_ALLOC_CMPL; u32 type2 = HWRM_RING_ALLOC_CMPL;
...@@ -6108,7 +6138,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) ...@@ -6108,7 +6138,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
if (!agg_rings) if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr; struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
u32 type2 = HWRM_RING_ALLOC_CMPL; u32 type2 = HWRM_RING_ALLOC_CMPL;
...@@ -6221,7 +6251,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) ...@@ -6221,7 +6251,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
} }
} }
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_RX_AGG; type = RING_FREE_REQ_RING_TYPE_RX_AGG;
else else
type = RING_FREE_REQ_RING_TYPE_RX; type = RING_FREE_REQ_RING_TYPE_RX;
...@@ -6248,7 +6278,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) ...@@ -6248,7 +6278,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
*/ */
bnxt_disable_int_sync(bp); bnxt_disable_int_sync(bp);
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
type = RING_FREE_REQ_RING_TYPE_NQ; type = RING_FREE_REQ_RING_TYPE_NQ;
else else
type = RING_FREE_REQ_RING_TYPE_L2_CMPL; type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
...@@ -6315,7 +6345,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) ...@@ -6315,7 +6345,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
cp = le16_to_cpu(resp->alloc_cmpl_rings); cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx); stats = le16_to_cpu(resp->alloc_stat_ctx);
hw_resc->resv_irqs = cp; hw_resc->resv_irqs = cp;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
int rx = hw_resc->resv_rx_rings; int rx = hw_resc->resv_rx_rings;
int tx = hw_resc->resv_tx_rings; int tx = hw_resc->resv_tx_rings;
...@@ -6380,7 +6410,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, ...@@ -6380,7 +6410,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (BNXT_NEW_RM(bp)) { if (BNXT_NEW_RM(bp)) {
enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
enables |= tx_rings + ring_grps ? enables |= tx_rings + ring_grps ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
...@@ -6396,7 +6426,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, ...@@ -6396,7 +6426,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
req->num_rx_rings = cpu_to_le16(rx_rings); req->num_rx_rings = cpu_to_le16(rx_rings);
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
req->num_msix = cpu_to_le16(cp_rings); req->num_msix = cpu_to_le16(cp_rings);
req->num_rsscos_ctxs = req->num_rsscos_ctxs =
...@@ -6431,7 +6461,7 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, ...@@ -6431,7 +6461,7 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
enables |= tx_rings + ring_grps ? enables |= tx_rings + ring_grps ?
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else { } else {
...@@ -6446,7 +6476,7 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, ...@@ -6446,7 +6476,7 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
req->num_tx_rings = cpu_to_le16(tx_rings); req->num_tx_rings = cpu_to_le16(tx_rings);
req->num_rx_rings = cpu_to_le16(rx_rings); req->num_rx_rings = cpu_to_le16(rx_rings);
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
} else { } else {
...@@ -6542,7 +6572,7 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp) ...@@ -6542,7 +6572,7 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
{ {
int cp; int cp;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return bnxt_nq_rings_in_use(bp); return bnxt_nq_rings_in_use(bp);
cp = bp->tx_nr_rings + bp->rx_nr_rings; cp = bp->tx_nr_rings + bp->rx_nr_rings;
...@@ -6599,7 +6629,8 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) ...@@ -6599,7 +6629,8 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
bnxt_check_rss_tbl_no_rmgr(bp); bnxt_check_rss_tbl_no_rmgr(bp);
return false; return false;
} }
if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) if ((bp->flags & BNXT_FLAG_RFS) &&
!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
vnic = rx + 1; vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS) if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1; rx <<= 1;
...@@ -6607,9 +6638,9 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) ...@@ -6607,9 +6638,9 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
(hw_resc->resv_hw_ring_grps != grp && (hw_resc->resv_hw_ring_grps != grp &&
!(bp->flags & BNXT_FLAG_CHIP_P5))) !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)))
return true; return true;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) && if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) &&
hw_resc->resv_irqs != nq) hw_resc->resv_irqs != nq)
return true; return true;
return false; return false;
...@@ -6631,7 +6662,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp) ...@@ -6631,7 +6662,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_SHARED_RINGS) if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true; sh = true;
if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5)) if ((bp->flags & BNXT_FLAG_RFS) &&
!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
vnic = rx + 1; vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS) if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1; rx <<= 1;
...@@ -6722,7 +6754,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, ...@@ -6722,7 +6754,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
req->flags = cpu_to_le32(flags); req->flags = cpu_to_le32(flags);
...@@ -6744,7 +6776,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, ...@@ -6744,7 +6776,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
else else
...@@ -6963,7 +6995,7 @@ bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, ...@@ -6963,7 +6995,7 @@ bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi,
rc = hwrm_req_send(bp, req); rc = hwrm_req_send(bp, req);
if (rc) if (rc)
return rc; return rc;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return 0; return 0;
} }
return 0; return 0;
...@@ -7000,7 +7032,7 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) ...@@ -7000,7 +7032,7 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
if (rc) if (rc)
break; break;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue; continue;
if (bnapi->rx_ring && bnapi->tx_ring[0]) { if (bnapi->rx_ring && bnapi->tx_ring[0]) {
...@@ -7158,7 +7190,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) ...@@ -7158,7 +7190,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (bp->db_size) if (bp->db_size)
goto func_qcfg_exit; goto func_qcfg_exit;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (BNXT_PF(bp)) if (BNXT_PF(bp))
min_db_offset = DB_PF_OFFSET_P5; min_db_offset = DB_PF_OFFSET_P5;
else else
...@@ -7175,37 +7207,103 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) ...@@ -7175,37 +7207,103 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
return rc; return rc;
} }
static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx, static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
struct hwrm_func_backing_store_qcaps_output *resp) u8 init_val, u8 init_offset,
bool init_mask_set)
{ {
struct bnxt_mem_init *mem_init; ctxm->init_value = init_val;
u16 init_mask; ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
u8 init_val; if (init_mask_set)
u8 *offset; ctxm->init_offset = init_offset * 4;
int i; else
ctxm->init_value = 0;
}
static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
u16 type;
init_val = resp->ctx_kind_initializer; for (type = 0; type < ctx_max; type++) {
init_mask = le16_to_cpu(resp->ctx_init_mask); struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
offset = &resp->qp_init_offset; int n = 1;
mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) { if (!ctxm->max_entries)
mem_init->init_val = init_val;
mem_init->offset = BNXT_MEM_INVALID_OFFSET;
if (!init_mask)
continue; continue;
if (i == BNXT_CTX_MEM_INIT_STAT)
offset = &resp->stat_init_offset; if (ctxm->instance_bmap)
if (init_mask & (1 << i)) n = hweight32(ctxm->instance_bmap);
mem_init->offset = *offset * 4; ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL);
else if (!ctxm->pg_info)
mem_init->init_val = 0; return -ENOMEM;
} }
ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size; return 0;
ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size; }
ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size; #define BNXT_CTX_INIT_VALID(flags) \
ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size; (!!((flags) & \
ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size; FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
{
struct hwrm_func_backing_store_qcaps_v2_output *resp;
struct hwrm_func_backing_store_qcaps_v2_input *req;
u16 last_valid_type = BNXT_CTX_INV;
struct bnxt_ctx_mem_info *ctx;
u16 type;
int rc;
rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
if (rc)
return rc;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
bp->ctx = ctx;
resp = hwrm_req_hold(bp, req);
for (type = 0; type < BNXT_CTX_V2_MAX; ) {
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
u8 init_val, init_off, i;
__le32 *p;
u32 flags;
req->type = cpu_to_le16(type);
rc = hwrm_req_send(bp, req);
if (rc)
goto ctx_done;
flags = le32_to_cpu(resp->flags);
type = le16_to_cpu(resp->next_valid_type);
if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
continue;
ctxm->type = le16_to_cpu(resp->type);
last_valid_type = ctxm->type;
ctxm->entry_size = le16_to_cpu(resp->entry_size);
ctxm->flags = flags;
ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
ctxm->entry_multiple = resp->entry_multiple;
ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
init_val = resp->ctx_init_value;
init_off = resp->ctx_init_offset;
bnxt_init_ctx_initializer(ctxm, init_val, init_off,
BNXT_CTX_INIT_VALID(flags));
ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
BNXT_MAX_SPLIT_ENTRY);
for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
i++, p++)
ctxm->split[i] = le32_to_cpu(*p);
}
if (last_valid_type < BNXT_CTX_V2_MAX)
ctx->ctx_arr[last_valid_type].last = true;
rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX);
ctx_done:
hwrm_req_drop(bp, req);
return rc;
} }
static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
...@@ -7217,6 +7315,9 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) ...@@ -7217,6 +7315,9 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
return 0; return 0;
if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
return bnxt_hwrm_func_backing_store_qcaps_v2(bp);
rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
if (rc) if (rc)
return rc; return rc;
...@@ -7224,48 +7325,83 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) ...@@ -7224,48 +7325,83 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
resp = hwrm_req_hold(bp, req); resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req); rc = hwrm_req_send_silent(bp, req);
if (!rc) { if (!rc) {
struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx; struct bnxt_ctx_mem_info *ctx;
int i, tqm_rings; u8 init_val, init_idx = 0;
u16 init_mask;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ctx = bp->ctx;
if (!ctx) { if (!ctx) {
rc = -ENOMEM; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
goto ctx_err; if (!ctx) {
} rc = -ENOMEM;
ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); goto ctx_err;
ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); }
ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); bp->ctx = ctx;
ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size); }
ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); init_val = resp->ctx_kind_initializer;
ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries); init_mask = le16_to_cpu(resp->ctx_init_mask);
ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries); ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size); ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
ctx->vnic_max_vnic_entries = ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
le16_to_cpu(resp->vnic_max_vnic_entries); ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
ctx->vnic_max_ring_table_entries = bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
(init_mask & (1 << init_idx++)) != 0);
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
(init_mask & (1 << init_idx++)) != 0);
ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
(init_mask & (1 << init_idx++)) != 0);
ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
ctxm->max_entries = ctxm->vnic_entries +
le16_to_cpu(resp->vnic_max_ring_table_entries); le16_to_cpu(resp->vnic_max_ring_table_entries);
ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size); ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries); bnxt_init_ctx_initializer(ctxm, init_val,
ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size); resp->vnic_init_offset,
ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size); (init_mask & (1 << init_idx++)) != 0);
ctx->tqm_min_entries_per_ring =
le32_to_cpu(resp->tqm_min_entries_per_ring); ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
ctx->tqm_max_entries_per_ring = ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
le32_to_cpu(resp->tqm_max_entries_per_ring); ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
ctx->tqm_entries_multiple = resp->tqm_entries_multiple; bnxt_init_ctx_initializer(ctxm, init_val,
if (!ctx->tqm_entries_multiple) resp->stat_init_offset,
ctx->tqm_entries_multiple = 1; (init_mask & (1 << init_idx++)) != 0);
ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
ctx->mrav_num_entries_units = ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
ctxm->entry_multiple = resp->tqm_entries_multiple;
if (!ctxm->entry_multiple)
ctxm->entry_multiple = 1;
memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
ctxm->mrav_num_entries_units =
le16_to_cpu(resp->mrav_num_entries_units); le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); bnxt_init_ctx_initializer(ctxm, init_val,
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); resp->mrav_init_offset,
(init_mask & (1 << init_idx++)) != 0);
bnxt_init_ctx_initializer(ctx, resp); ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count) if (!ctx->tqm_fp_rings_count)
...@@ -7273,16 +7409,11 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) ...@@ -7273,16 +7409,11 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS; ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL); memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
if (!ctx_pg) { ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
kfree(ctx);
rc = -ENOMEM; rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX);
goto ctx_err;
}
for (i = 0; i < tqm_rings; i++, ctx_pg++)
ctx->tqm_mem[i] = ctx_pg;
bp->ctx = ctx;
} else { } else {
rc = 0; rc = 0;
} }
...@@ -7321,6 +7452,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) ...@@ -7321,6 +7452,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct hwrm_func_backing_store_cfg_input *req; struct hwrm_func_backing_store_cfg_input *req;
struct bnxt_ctx_mem_info *ctx = bp->ctx; struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_type *ctxm;
void **__req = (void **)&req; void **__req = (void **)&req;
u32 req_len = sizeof(*req); u32 req_len = sizeof(*req);
__le32 *num_entries; __le32 *num_entries;
...@@ -7342,82 +7474,99 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) ...@@ -7342,82 +7474,99 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
req->enables = cpu_to_le32(enables); req->enables = cpu_to_le32(enables);
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
ctx_pg = &ctx->qp_mem; ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
ctx_pg = ctxm->pg_info;
req->qp_num_entries = cpu_to_le32(ctx_pg->entries); req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size); req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->qpc_pg_size_qpc_lvl, &req->qpc_pg_size_qpc_lvl,
&req->qpc_page_dir); &req->qpc_page_dir);
} }
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
ctx_pg = &ctx->srq_mem; ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
ctx_pg = ctxm->pg_info;
req->srq_num_entries = cpu_to_le32(ctx_pg->entries); req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size); req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->srq_pg_size_srq_lvl, &req->srq_pg_size_srq_lvl,
&req->srq_page_dir); &req->srq_page_dir);
} }
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
ctx_pg = &ctx->cq_mem; ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
ctx_pg = ctxm->pg_info;
req->cq_num_entries = cpu_to_le32(ctx_pg->entries); req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size); req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->cq_pg_size_cq_lvl, &req->cq_pg_size_cq_lvl,
&req->cq_page_dir); &req->cq_page_dir);
} }
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
ctx_pg = &ctx->vnic_mem; ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
req->vnic_num_vnic_entries = ctx_pg = ctxm->pg_info;
cpu_to_le16(ctx->vnic_max_vnic_entries); req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
req->vnic_num_ring_table_entries = req->vnic_num_ring_table_entries =
cpu_to_le16(ctx->vnic_max_ring_table_entries); cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->vnic_pg_size_vnic_lvl, &req->vnic_pg_size_vnic_lvl,
&req->vnic_page_dir); &req->vnic_page_dir);
} }
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
ctx_pg = &ctx->stat_mem; ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries); ctx_pg = ctxm->pg_info;
req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size); req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->stat_pg_size_stat_lvl, &req->stat_pg_size_stat_lvl,
&req->stat_page_dir); &req->stat_page_dir);
} }
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
ctx_pg = &ctx->mrav_mem; u32 units;
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
ctx_pg = ctxm->pg_info;
req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
if (ctx->mrav_num_entries_units) units = ctxm->mrav_num_entries_units;
flags |= if (units) {
FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; u32 num_mr, num_ah = ctxm->mrav_av_entries;
req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); u32 entries;
num_mr = ctx_pg->entries - num_ah;
entries = ((num_mr / units) << 16) | (num_ah / units);
req->mrav_num_entries = cpu_to_le32(entries);
flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
}
req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->mrav_pg_size_mrav_lvl, &req->mrav_pg_size_mrav_lvl,
&req->mrav_page_dir); &req->mrav_page_dir);
} }
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
ctx_pg = &ctx->tim_mem; ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
ctx_pg = ctxm->pg_info;
req->tim_num_entries = cpu_to_le32(ctx_pg->entries); req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size); req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->tim_pg_size_tim_lvl, &req->tim_pg_size_tim_lvl,
&req->tim_page_dir); &req->tim_page_dir);
} }
ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
for (i = 0, num_entries = &req->tqm_sp_num_entries, for (i = 0, num_entries = &req->tqm_sp_num_entries,
pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req->tqm_sp_page_dir, pg_dir = &req->tqm_sp_page_dir,
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP,
ctx_pg = ctxm->pg_info;
i < BNXT_MAX_TQM_RINGS; i < BNXT_MAX_TQM_RINGS;
ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena)) if (!(enables & ena))
continue; continue;
req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
ctx_pg = ctx->tqm_mem[i];
*num_entries = cpu_to_le32(ctx_pg->entries); *num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
} }
...@@ -7441,7 +7590,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, ...@@ -7441,7 +7590,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
u8 depth, struct bnxt_mem_init *mem_init) u8 depth, struct bnxt_ctx_mem_type *ctxm)
{ {
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc; int rc;
...@@ -7479,7 +7628,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, ...@@ -7479,7 +7628,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1; rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES; rmem->nr_pages = MAX_CTX_PAGES;
rmem->mem_init = mem_init; rmem->ctx_mem = ctxm;
if (i == (nr_tbls - 1)) { if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
...@@ -7494,7 +7643,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, ...@@ -7494,7 +7643,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth) if (rmem->nr_pages > 1 || depth)
rmem->depth = 1; rmem->depth = 1;
rmem->mem_init = mem_init; rmem->ctx_mem = ctxm;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
} }
return rc; return rc;
...@@ -7529,38 +7678,131 @@ static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, ...@@ -7529,38 +7678,131 @@ static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
ctx_pg->nr_pages = 0; ctx_pg->nr_pages = 0;
} }
static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_mem_type *ctxm, u32 entries,
u8 pg_lvl)
{
struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
int i, rc = 0, n = 1;
u32 mem_size;
if (!ctxm->entry_size || !ctx_pg)
return -EINVAL;
if (ctxm->instance_bmap)
n = hweight32(ctxm->instance_bmap);
if (ctxm->entry_multiple)
entries = roundup(entries, ctxm->entry_multiple);
entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
mem_size = entries * ctxm->entry_size;
for (i = 0; i < n && !rc; i++) {
ctx_pg[i].entries = entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
ctxm->init_value ? ctxm : NULL);
}
return rc;
}
static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
struct bnxt_ctx_mem_type *ctxm,
bool last)
{
struct hwrm_func_backing_store_cfg_v2_input *req;
u32 instance_bmap = ctxm->instance_bmap;
int i, j, rc = 0, n = 1;
__le32 *p;
if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
return 0;
if (instance_bmap)
n = hweight32(ctxm->instance_bmap);
else
instance_bmap = 1;
rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2);
if (rc)
return rc;
hwrm_req_hold(bp, req);
req->type = cpu_to_le16(ctxm->type);
req->entry_size = cpu_to_le16(ctxm->entry_size);
req->subtype_valid_cnt = ctxm->split_entry_cnt;
for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
p[i] = cpu_to_le32(ctxm->split[i]);
for (i = 0, j = 0; j < n && !rc; i++) {
struct bnxt_ctx_pg_info *ctx_pg;
if (!(instance_bmap & (1 << i)))
continue;
req->instance = cpu_to_le16(i);
ctx_pg = &ctxm->pg_info[j++];
if (!ctx_pg->entries)
continue;
req->num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->page_size_pbl_level,
&req->page_dir);
if (last && j == n)
req->flags =
cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE);
rc = hwrm_req_send(bp, req);
}
hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_mem_type *ctxm;
int rc = 0;
u16 type;
for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
ctxm = &ctx->ctx_arr[type];
rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
if (rc)
return rc;
}
return 0;
}
void bnxt_free_ctx_mem(struct bnxt *bp) void bnxt_free_ctx_mem(struct bnxt *bp)
{ {
struct bnxt_ctx_mem_info *ctx = bp->ctx; struct bnxt_ctx_mem_info *ctx = bp->ctx;
int i; u16 type;
if (!ctx) if (!ctx)
return; return;
if (ctx->tqm_mem[0]) { for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
kfree(ctx->tqm_mem[0]); int i, n = 1;
ctx->tqm_mem[0] = NULL;
if (!ctx_pg)
continue;
if (ctxm->instance_bmap)
n = hweight32(ctxm->instance_bmap);
for (i = 0; i < n; i++)
bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]);
kfree(ctx_pg);
ctxm->pg_info = NULL;
} }
bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
ctx->flags &= ~BNXT_CTX_FLAG_INITED; ctx->flags &= ~BNXT_CTX_FLAG_INITED;
kfree(ctx);
bp->ctx = NULL;
} }
static int bnxt_alloc_ctx_mem(struct bnxt *bp) static int bnxt_alloc_ctx_mem(struct bnxt *bp)
{ {
struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx; struct bnxt_ctx_mem_info *ctx;
struct bnxt_mem_init *init; u32 l2_qps, qp1_qps, max_qps;
u32 mem_size, ena, entries; u32 ena, entries_sp, entries;
u32 entries_sp, min; u32 srqs, max_srqs, min;
u32 num_mr, num_ah; u32 num_mr, num_ah;
u32 extra_srqs = 0; u32 extra_srqs = 0;
u32 extra_qps = 0; u32 extra_qps = 0;
...@@ -7577,120 +7819,93 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ...@@ -7577,120 +7819,93 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0; return 0;
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
l2_qps = ctxm->qp_l2_entries;
qp1_qps = ctxm->qp_qp1_entries;
max_qps = ctxm->max_entries;
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
srqs = ctxm->srq_l2_entries;
max_srqs = ctxm->max_entries;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2; pg_lvl = 2;
extra_qps = 65536; extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
extra_srqs = 8192; extra_srqs = min_t(u32, 8192, max_srqs - srqs);
} }
ctx_pg = &ctx->qp_mem; ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps,
extra_qps; pg_lvl);
if (ctx->qp_entry_size) { if (rc)
mem_size = ctx->qp_entry_size * ctx_pg->entries; return rc;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
if (rc)
return rc;
}
ctx_pg = &ctx->srq_mem; ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl);
if (ctx->srq_entry_size) { if (rc)
mem_size = ctx->srq_entry_size * ctx_pg->entries; return rc;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
if (rc)
return rc;
}
ctx_pg = &ctx->cq_mem; ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries +
if (ctx->cq_entry_size) { extra_qps * 2, pg_lvl);
mem_size = ctx->cq_entry_size * ctx_pg->entries; if (rc)
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ]; return rc;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
if (rc)
return rc;
}
ctx_pg = &ctx->vnic_mem; ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
ctx_pg->entries = ctx->vnic_max_vnic_entries + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
ctx->vnic_max_ring_table_entries; if (rc)
if (ctx->vnic_entry_size) { return rc;
mem_size = ctx->vnic_entry_size * ctx_pg->entries;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
if (rc)
return rc;
}
ctx_pg = &ctx->stat_mem; ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
ctx_pg->entries = ctx->stat_max_entries; rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1);
if (ctx->stat_entry_size) { if (rc)
mem_size = ctx->stat_entry_size * ctx_pg->entries; return rc;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
if (rc)
return rc;
}
ena = 0; ena = 0;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
goto skip_rdma; goto skip_rdma;
ctx_pg = &ctx->mrav_mem; ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
/* 128K extra is needed to accommodate static AH context /* 128K extra is needed to accommodate static AH context
* allocation by f/w. * allocation by f/w.
*/ */
num_mr = 1024 * 256; num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
num_ah = 1024 * 128; num_ah = min_t(u32, num_mr, 1024 * 128);
ctx_pg->entries = num_mr + num_ah; ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
if (ctx->mrav_entry_size) { if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
mem_size = ctx->mrav_entry_size * ctx_pg->entries; ctxm->mrav_av_entries = num_ah;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init); rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
if (rc) if (rc)
return rc; return rc;
}
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
if (ctx->mrav_num_entries_units)
ctx_pg->entries = ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
((num_mr / ctx->mrav_num_entries_units) << 16) | rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1);
(num_ah / ctx->mrav_num_entries_units); if (rc)
return rc;
ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries;
if (ctx->tim_entry_size) {
mem_size = ctx->tim_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
if (rc)
return rc;
}
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma: skip_rdma:
min = ctx->tqm_min_entries_per_ring; ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries + min = ctxm->min_entries;
2 * (extra_qps + ctx->qp_min_qp1_entries) + min; entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple); 2 * (extra_qps + qp1_qps) + min;
entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries); rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2);
entries = roundup(entries, ctx->tqm_entries_multiple); if (rc)
entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring); return rc;
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
ctx_pg = ctx->tqm_mem[i]; ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
ctx_pg->entries = i ? entries : entries_sp; entries = l2_qps + 2 * (extra_qps + qp1_qps);
if (ctx->tqm_entry_size) { rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2);
mem_size = ctx->tqm_entry_size * ctx_pg->entries; if (rc)
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, return rc;
NULL); for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
if (rc)
return rc;
}
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
}
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
rc = bnxt_backing_store_cfg_v2(bp);
else
rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
if (rc) { if (rc) {
netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
rc); rc);
...@@ -7738,7 +7953,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) ...@@ -7738,7 +7953,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 max_msix = le16_to_cpu(resp->max_msix); u16 max_msix = le16_to_cpu(resp->max_msix);
hw_resc->max_nqs = max_msix; hw_resc->max_nqs = max_msix;
...@@ -7767,7 +7982,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) ...@@ -7767,7 +7982,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
u8 flags; u8 flags;
int rc; int rc;
if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) { if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
rc = -ENODEV; rc = -ENODEV;
goto no_ptp; goto no_ptp;
} }
...@@ -7799,7 +8014,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) ...@@ -7799,7 +8014,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) { if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
} else if (bp->flags & BNXT_FLAG_CHIP_P5) { } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
} else { } else {
...@@ -7871,6 +8086,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) ...@@ -7871,6 +8086,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
flags_ext2 = le32_to_cpu(resp->flags_ext2); flags_ext2 = le32_to_cpu(resp->flags_ext2);
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED) if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
...@@ -8506,7 +8723,7 @@ static void bnxt_accumulate_all_stats(struct bnxt *bp) ...@@ -8506,7 +8723,7 @@ static void bnxt_accumulate_all_stats(struct bnxt *bp)
int i; int i;
/* Chip bug. Counter intermittently becomes 0. */ /* Chip bug. Counter intermittently becomes 0. */
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
ignore_zero = true; ignore_zero = true;
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
...@@ -8700,7 +8917,7 @@ static void bnxt_clear_vnic(struct bnxt *bp) ...@@ -8700,7 +8917,7 @@ static void bnxt_clear_vnic(struct bnxt *bp)
return; return;
bnxt_hwrm_clear_vnic_filter(bp); bnxt_hwrm_clear_vnic_filter(bp);
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) {
/* clear all RSS setting before free vnic ctx */ /* clear all RSS setting before free vnic ctx */
bnxt_hwrm_clear_vnic_rss(bp); bnxt_hwrm_clear_vnic_rss(bp);
bnxt_hwrm_vnic_ctx_free(bp); bnxt_hwrm_vnic_ctx_free(bp);
...@@ -8709,7 +8926,7 @@ static void bnxt_clear_vnic(struct bnxt *bp) ...@@ -8709,7 +8926,7 @@ static void bnxt_clear_vnic(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_TPA) if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, false); bnxt_set_tpa(bp, false);
bnxt_hwrm_vnic_free(bp); bnxt_hwrm_vnic_free(bp);
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
bnxt_hwrm_vnic_ctx_free(bp); bnxt_hwrm_vnic_ctx_free(bp);
} }
...@@ -8866,7 +9083,7 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) ...@@ -8866,7 +9083,7 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return __bnxt_setup_vnic_p5(bp, vnic_id); return __bnxt_setup_vnic_p5(bp, vnic_id);
else else
return __bnxt_setup_vnic(bp, vnic_id); return __bnxt_setup_vnic(bp, vnic_id);
...@@ -8877,7 +9094,7 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) ...@@ -8877,7 +9094,7 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
int i, rc = 0; int i, rc = 0;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0; return 0;
for (i = 0; i < bp->rx_nr_rings; i++) { for (i = 0; i < bp->rx_nr_rings; i++) {
...@@ -9259,7 +9476,7 @@ static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) ...@@ -9259,7 +9476,7 @@ static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
{ {
unsigned int cp = bp->hw_resc.max_cp_rings; unsigned int cp = bp->hw_resc.max_cp_rings;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
cp -= bnxt_get_ulp_msix_num(bp); cp -= bnxt_get_ulp_msix_num(bp);
return cp; return cp;
...@@ -9269,7 +9486,7 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) ...@@ -9269,7 +9486,7 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{ {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc; struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
...@@ -9285,7 +9502,7 @@ unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) ...@@ -9285,7 +9502,7 @@ unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
unsigned int cp; unsigned int cp;
cp = bnxt_get_max_func_cp_rings_for_en(bp); cp = bnxt_get_max_func_cp_rings_for_en(bp);
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return cp - bp->rx_nr_rings - bp->tx_nr_rings; return cp - bp->rx_nr_rings - bp->tx_nr_rings;
else else
return cp - bp->cp_nr_rings; return cp - bp->cp_nr_rings;
...@@ -9304,7 +9521,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num) ...@@ -9304,7 +9521,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
int max_idx, avail_msix; int max_idx, avail_msix;
max_idx = bp->total_irqs; max_idx = bp->total_irqs;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
max_idx = min_t(int, bp->total_irqs, max_cp); max_idx = min_t(int, bp->total_irqs, max_cp);
avail_msix = max_idx - bp->cp_nr_rings; avail_msix = max_idx - bp->cp_nr_rings;
if (!BNXT_NEW_RM(bp) || avail_msix >= num) if (!BNXT_NEW_RM(bp) || avail_msix >= num)
...@@ -9583,7 +9800,7 @@ static void bnxt_init_napi(struct bnxt *bp) ...@@ -9583,7 +9800,7 @@ static void bnxt_init_napi(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_USING_MSIX) { if (bp->flags & BNXT_FLAG_USING_MSIX) {
int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
poll_fn = bnxt_poll_p5; poll_fn = bnxt_poll_p5;
else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
cp_nr_rings--; cp_nr_rings--;
...@@ -10321,8 +10538,6 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) ...@@ -10321,8 +10538,6 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_stop(bp); bnxt_ulp_stop(bp);
bnxt_free_ctx_mem(bp); bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
bnxt_dcb_free(bp); bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp); rc = bnxt_fw_init_one(bp);
if (rc) { if (rc) {
...@@ -11304,7 +11519,7 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp) ...@@ -11304,7 +11519,7 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
/* If the chip and firmware supports RFS */ /* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp) static bool bnxt_rfs_supported(struct bnxt *bp)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
return true; return true;
return false; return false;
...@@ -11325,7 +11540,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp) ...@@ -11325,7 +11540,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
int vnics, max_vnics, max_rss_ctxs; int vnics, max_vnics, max_rss_ctxs;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return bnxt_rfs_supported(bp); return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false; return false;
...@@ -11427,7 +11642,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) ...@@ -11427,7 +11642,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
update_tpa = true; update_tpa = true;
if ((bp->flags & BNXT_FLAG_TPA) == 0 || if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
(flags & BNXT_FLAG_TPA) == 0 || (flags & BNXT_FLAG_TPA) == 0 ||
(bp->flags & BNXT_FLAG_CHIP_P5)) (bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
re_init = true; re_init = true;
} }
...@@ -11838,8 +12053,7 @@ static void bnxt_timer(struct timer_list *t) ...@@ -11838,8 +12053,7 @@ static void bnxt_timer(struct timer_list *t)
if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev))
netif_carrier_ok(dev))
bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
bnxt_restart_timer: bnxt_restart_timer:
...@@ -11948,8 +12162,6 @@ static void bnxt_fw_reset_close(struct bnxt *bp) ...@@ -11948,8 +12162,6 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
if (pci_is_enabled(bp->pdev)) if (pci_is_enabled(bp->pdev))
pci_disable_device(bp->pdev); pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp); bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
} }
static bool is_bnxt_fw_ok(struct bnxt *bp) static bool is_bnxt_fw_ok(struct bnxt *bp)
...@@ -12092,7 +12304,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp) ...@@ -12092,7 +12304,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
{ {
int i; int i;
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return; return;
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
...@@ -12296,7 +12508,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, ...@@ -12296,7 +12508,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
return -ENOMEM; return -ENOMEM;
vnics = 1; vnics = 1;
if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS) if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) ==
BNXT_FLAG_RFS)
vnics += rx; vnics += rx;
tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp); tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp);
...@@ -12377,10 +12590,10 @@ static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) ...@@ -12377,10 +12590,10 @@ static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
{ {
u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
if (!(bp->flags & BNXT_FLAG_CHIP_P5) && if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
return true; return true;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
return true; return true;
return false; return false;
...@@ -13368,8 +13581,6 @@ static void bnxt_remove_one(struct pci_dev *pdev) ...@@ -13368,8 +13581,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bp->fw_health = NULL; bp->fw_health = NULL;
bnxt_cleanup_pci(bp); bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp); bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
kfree(bp->rss_indir_tbl); kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL; bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp); bnxt_free_port_stats(bp);
...@@ -13438,7 +13649,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, ...@@ -13438,7 +13649,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
bnxt_get_ulp_msix_num(bp), bnxt_get_ulp_msix_num(bp),
hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
*max_cp = min_t(int, *max_cp, max_irq); *max_cp = min_t(int, *max_cp, max_irq);
max_ring_grps = hw_resc->max_hw_ring_grps; max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
...@@ -13447,7 +13658,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, ...@@ -13447,7 +13658,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
} }
if (bp->flags & BNXT_FLAG_AGG_RINGS) if (bp->flags & BNXT_FLAG_AGG_RINGS)
*max_rx >>= 1; *max_rx >>= 1;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (*max_cp < (*max_rx + *max_tx)) { if (*max_cp < (*max_rx + *max_tx)) {
*max_rx = *max_cp / 2; *max_rx = *max_cp / 2;
*max_tx = *max_rx; *max_tx = *max_rx;
...@@ -13795,8 +14006,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -13795,8 +14006,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_PF(bp)) if (BNXT_PF(bp))
bnxt_vpd_read_info(bp); bnxt_vpd_read_info(bp);
if (BNXT_CHIP_P5(bp)) { if (BNXT_CHIP_P5_PLUS(bp)) {
bp->flags |= BNXT_FLAG_CHIP_P5; bp->flags |= BNXT_FLAG_CHIP_P5_PLUS;
if (BNXT_CHIP_SR2(bp)) if (BNXT_CHIP_SR2(bp))
bp->flags |= BNXT_FLAG_CHIP_SR2; bp->flags |= BNXT_FLAG_CHIP_SR2;
} }
...@@ -13861,7 +14072,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -13861,7 +14072,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->gro_func = bnxt_gro_func_5730x; bp->gro_func = bnxt_gro_func_5730x;
if (BNXT_CHIP_P4(bp)) if (BNXT_CHIP_P4(bp))
bp->gro_func = bnxt_gro_func_5731x; bp->gro_func = bnxt_gro_func_5731x;
else if (BNXT_CHIP_P5(bp)) else if (BNXT_CHIP_P5_PLUS(bp))
bp->gro_func = bnxt_gro_func_5750x; bp->gro_func = bnxt_gro_func_5750x;
} }
if (!BNXT_CHIP_P4_PLUS(bp)) if (!BNXT_CHIP_P4_PLUS(bp))
...@@ -13969,8 +14180,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -13969,8 +14180,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->fw_health = NULL; bp->fw_health = NULL;
bnxt_cleanup_pci(bp); bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp); bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
kfree(bp->rss_indir_tbl); kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL; bp->rss_indir_tbl = NULL;
...@@ -14023,8 +14232,6 @@ static int bnxt_suspend(struct device *device) ...@@ -14023,8 +14232,6 @@ static int bnxt_suspend(struct device *device)
bnxt_hwrm_func_drv_unrgtr(bp); bnxt_hwrm_func_drv_unrgtr(bp);
pci_disable_device(bp->pdev); pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp); bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
rtnl_unlock(); rtnl_unlock();
return rc; return rc;
} }
...@@ -14121,8 +14328,6 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, ...@@ -14121,8 +14328,6 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (pci_is_enabled(pdev)) if (pci_is_enabled(pdev))
pci_disable_device(pdev); pci_disable_device(pdev);
bnxt_free_ctx_mem(bp); bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
rtnl_unlock(); rtnl_unlock();
/* Request a slot slot reset. */ /* Request a slot slot reset. */
......
...@@ -686,10 +686,12 @@ struct nqe_cn { ...@@ -686,10 +686,12 @@ struct nqe_cn {
*/ */
#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2) #define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) #define RX_RING(bp, x) (((x) & (bp)->rx_ring_mask) >> (BNXT_PAGE_SHIFT - 4))
#define RX_AGG_RING(bp, x) (((x) & (bp)->rx_agg_ring_mask) >> \
(BNXT_PAGE_SHIFT - 4))
#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1)) #define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
#define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) #define TX_RING(bp, x) (((x) & (bp)->tx_ring_mask) >> (BNXT_PAGE_SHIFT - 4))
#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1)) #define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) #define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
...@@ -716,11 +718,14 @@ struct nqe_cn { ...@@ -716,11 +718,14 @@ struct nqe_cn {
#define RX_CMP_TYPE(rxcmp) \ #define RX_CMP_TYPE(rxcmp) \
(le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE) (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
#define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask) #define RING_RX(bp, idx) ((idx) & (bp)->rx_ring_mask)
#define NEXT_RX(idx) ((idx) + 1)
#define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask) #define RING_RX_AGG(bp, idx) ((idx) & (bp)->rx_agg_ring_mask)
#define NEXT_RX_AGG(idx) ((idx) + 1)
#define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask) #define RING_TX(bp, idx) ((idx) & (bp)->tx_ring_mask)
#define NEXT_TX(idx) ((idx) + 1)
#define ADV_RAW_CMP(idx, n) ((idx) + (n)) #define ADV_RAW_CMP(idx, n) ((idx) + (n))
#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1) #define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
...@@ -762,13 +767,6 @@ struct bnxt_sw_rx_agg_bd { ...@@ -762,13 +767,6 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping; dma_addr_t mapping;
}; };
struct bnxt_mem_init {
u8 init_val;
u16 offset;
#define BNXT_MEM_INVALID_OFFSET 0xffff
u16 size;
};
struct bnxt_ring_mem_info { struct bnxt_ring_mem_info {
int nr_pages; int nr_pages;
int page_size; int page_size;
...@@ -778,7 +776,7 @@ struct bnxt_ring_mem_info { ...@@ -778,7 +776,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4 #define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth; u16 depth;
struct bnxt_mem_init *mem_init; struct bnxt_ctx_mem_type *ctx_mem;
void **pg_arr; void **pg_arr;
dma_addr_t *dma_arr; dma_addr_t *dma_arr;
...@@ -820,8 +818,11 @@ struct bnxt_db_info { ...@@ -820,8 +818,11 @@ struct bnxt_db_info {
u64 db_key64; u64 db_key64;
u32 db_key32; u32 db_key32;
}; };
u32 db_ring_mask;
}; };
#define DB_RING_IDX(db, idx) ((idx) & (db)->db_ring_mask)
struct bnxt_tx_ring_info { struct bnxt_tx_ring_info {
struct bnxt_napi *bnapi; struct bnxt_napi *bnapi;
struct bnxt_cp_ring_info *tx_cpr; struct bnxt_cp_ring_info *tx_cpr;
...@@ -1016,6 +1017,8 @@ struct bnxt_cp_ring_info { ...@@ -1016,6 +1017,8 @@ struct bnxt_cp_ring_info {
u8 had_work_done:1; u8 had_work_done:1;
u8 has_more_work:1; u8 has_more_work:1;
u8 had_nqe_notify:1;
u8 cp_ring_type; u8 cp_ring_type;
u8 cp_idx; u8 cp_idx;
...@@ -1427,7 +1430,7 @@ struct bnxt_test_info { ...@@ -1427,7 +1430,7 @@ struct bnxt_test_info {
}; };
#define CHIMP_REG_VIEW_ADDR \ #define CHIMP_REG_VIEW_ADDR \
((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000) ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ? 0x80000000 : 0xb1000000)
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0 #define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100 #define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
...@@ -1551,53 +1554,72 @@ do { \ ...@@ -1551,53 +1554,72 @@ do { \
attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
} while (0) } while (0)
struct bnxt_ctx_mem_type {
u16 type;
u16 entry_size;
u32 flags;
#define BNXT_CTX_MEM_TYPE_VALID FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
u32 instance_bmap;
u8 init_value;
u8 entry_multiple;
u16 init_offset;
#define BNXT_CTX_INIT_INVALID_OFFSET 0xffff
u32 max_entries;
u32 min_entries;
u8 last:1;
u8 split_entry_cnt;
#define BNXT_MAX_SPLIT_ENTRY 4
union {
struct {
u32 qp_l2_entries;
u32 qp_qp1_entries;
u32 qp_fast_qpmd_entries;
};
u32 srq_l2_entries;
u32 cq_l2_entries;
u32 vnic_entries;
struct {
u32 mrav_av_entries;
u32 mrav_num_entries_units;
};
u32 split[BNXT_MAX_SPLIT_ENTRY];
};
struct bnxt_ctx_pg_info *pg_info;
};
#define BNXT_CTX_MRAV_AV_SPLIT_ENTRY 0
#define BNXT_CTX_QP FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP
#define BNXT_CTX_SRQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ
#define BNXT_CTX_CQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ
#define BNXT_CTX_VNIC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC
#define BNXT_CTX_STAT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT
#define BNXT_CTX_STQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING
#define BNXT_CTX_FTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
#define BNXT_CTX_MRAV FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
#define BNXT_CTX_TIM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
#define BNXT_CTX_TKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC
#define BNXT_CTX_RKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC
#define BNXT_CTX_MTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
#define BNXT_CTX_SQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
#define BNXT_CTX_RQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
#define BNXT_CTX_SRQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
#define BNXT_CTX_CQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
#define BNXT_CTX_QTKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC
#define BNXT_CTX_QRKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC
#define BNXT_CTX_TBLSC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE
#define BNXT_CTX_XPAR FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION
#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1)
#define BNXT_CTX_V2_MAX (BNXT_CTX_XPAR + 1)
#define BNXT_CTX_INV ((u16)-1)
struct bnxt_ctx_mem_info { struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
u16 qp_max_l2_entries;
u16 qp_entry_size;
u16 srq_max_l2_entries;
u32 srq_max_entries;
u16 srq_entry_size;
u16 cq_max_l2_entries;
u32 cq_max_entries;
u16 cq_entry_size;
u16 vnic_max_vnic_entries;
u16 vnic_max_ring_table_entries;
u16 vnic_entry_size;
u32 stat_max_entries;
u16 stat_entry_size;
u16 tqm_entry_size;
u32 tqm_min_entries_per_ring;
u32 tqm_max_entries_per_ring;
u32 mrav_max_entries;
u16 mrav_entry_size;
u16 tim_entry_size;
u32 tim_max_entries;
u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 tqm_fp_rings_count; u8 tqm_fp_rings_count;
u32 flags; u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01 #define BNXT_CTX_FLAG_INITED 0x01
struct bnxt_ctx_mem_type ctx_arr[BNXT_CTX_V2_MAX];
struct bnxt_ctx_pg_info qp_mem;
struct bnxt_ctx_pg_info srq_mem;
struct bnxt_ctx_pg_info cq_mem;
struct bnxt_ctx_pg_info vnic_mem;
struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
#define BNXT_CTX_MEM_INIT_QP 0
#define BNXT_CTX_MEM_INIT_SRQ 1
#define BNXT_CTX_MEM_INIT_CQ 2
#define BNXT_CTX_MEM_INIT_VNIC 3
#define BNXT_CTX_MEM_INIT_STAT 4
#define BNXT_CTX_MEM_INIT_MRAV 5
#define BNXT_CTX_MEM_INIT_MAX 6
struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
}; };
enum bnxt_health_severity { enum bnxt_health_severity {
...@@ -1837,7 +1859,7 @@ struct bnxt { ...@@ -1837,7 +1859,7 @@ struct bnxt {
atomic_t intr_sem; atomic_t intr_sem;
u32 flags; u32 flags;
#define BNXT_FLAG_CHIP_P5 0x1 #define BNXT_FLAG_CHIP_P5_PLUS 0x1
#define BNXT_FLAG_VF 0x2 #define BNXT_FLAG_VF 0x2
#define BNXT_FLAG_LRO 0x4 #define BNXT_FLAG_LRO 0x4
#ifdef CONFIG_INET #ifdef CONFIG_INET
...@@ -1891,21 +1913,21 @@ struct bnxt { ...@@ -1891,21 +1913,21 @@ struct bnxt {
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \ #define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
(!((bp)->flags & BNXT_FLAG_CHIP_P5) || \ (!((bp)->flags & BNXT_FLAG_CHIP_P5_PLUS) ||\
(bp)->max_tpa_v2) && !is_kdump_kernel()) (bp)->max_tpa_v2) && !is_kdump_kernel())
#define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO) #define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO)
#define BNXT_CHIP_SR2(bp) \ #define BNXT_CHIP_SR2(bp) \
((bp)->chip_num == CHIP_NUM_58818) ((bp)->chip_num == CHIP_NUM_58818)
#define BNXT_CHIP_P5_THOR(bp) \ #define BNXT_CHIP_P5(bp) \
((bp)->chip_num == CHIP_NUM_57508 || \ ((bp)->chip_num == CHIP_NUM_57508 || \
(bp)->chip_num == CHIP_NUM_57504 || \ (bp)->chip_num == CHIP_NUM_57504 || \
(bp)->chip_num == CHIP_NUM_57502) (bp)->chip_num == CHIP_NUM_57502)
/* Chip class phase 5 */ /* Chip class phase 5 */
#define BNXT_CHIP_P5(bp) \ #define BNXT_CHIP_P5_PLUS(bp) \
(BNXT_CHIP_P5_THOR(bp) || BNXT_CHIP_SR2(bp)) (BNXT_CHIP_P5(bp) || BNXT_CHIP_SR2(bp))
/* Chip class phase 4.x */ /* Chip class phase 4.x */
#define BNXT_CHIP_P4(bp) \ #define BNXT_CHIP_P4(bp) \
...@@ -1916,7 +1938,7 @@ struct bnxt { ...@@ -1916,7 +1938,7 @@ struct bnxt {
!BNXT_CHIP_TYPE_NITRO_A0(bp))) !BNXT_CHIP_TYPE_NITRO_A0(bp)))
#define BNXT_CHIP_P4_PLUS(bp) \ #define BNXT_CHIP_P4_PLUS(bp) \
(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp)) (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp))
struct bnxt_aux_priv *aux_priv; struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev; struct bnxt_en_dev *edev;
...@@ -2059,6 +2081,7 @@ struct bnxt { ...@@ -2059,6 +2081,7 @@ struct bnxt {
#define BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED BIT_ULL(33) #define BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED BIT_ULL(33)
#define BNXT_FW_CAP_DFLT_VLAN_TPID_PCP BIT_ULL(34) #define BNXT_FW_CAP_DFLT_VLAN_TPID_PCP BIT_ULL(34)
#define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35) #define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35)
#define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(36)
u32 fw_dbg_cap; u32 fw_dbg_cap;
...@@ -2339,10 +2362,11 @@ static inline void bnxt_writeq_relaxed(struct bnxt *bp, u64 val, ...@@ -2339,10 +2362,11 @@ static inline void bnxt_writeq_relaxed(struct bnxt *bp, u64 val,
static inline void bnxt_db_write_relaxed(struct bnxt *bp, static inline void bnxt_db_write_relaxed(struct bnxt *bp,
struct bnxt_db_info *db, u32 idx) struct bnxt_db_info *db, u32 idx)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
bnxt_writeq_relaxed(bp, db->db_key64 | idx, db->doorbell); bnxt_writeq_relaxed(bp, db->db_key64 | DB_RING_IDX(db, idx),
db->doorbell);
} else { } else {
u32 db_val = db->db_key32 | idx; u32 db_val = db->db_key32 | DB_RING_IDX(db, idx);
writel_relaxed(db_val, db->doorbell); writel_relaxed(db_val, db->doorbell);
if (bp->flags & BNXT_FLAG_DOUBLE_DB) if (bp->flags & BNXT_FLAG_DOUBLE_DB)
...@@ -2354,10 +2378,11 @@ static inline void bnxt_db_write_relaxed(struct bnxt *bp, ...@@ -2354,10 +2378,11 @@ static inline void bnxt_db_write_relaxed(struct bnxt *bp,
static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
u32 idx) u32 idx)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
bnxt_writeq(bp, db->db_key64 | idx, db->doorbell); bnxt_writeq(bp, db->db_key64 | DB_RING_IDX(db, idx),
db->doorbell);
} else { } else {
u32 db_val = db->db_key32 | idx; u32 db_val = db->db_key32 | DB_RING_IDX(db, idx);
writel(db_val, db->doorbell); writel(db_val, db->doorbell);
if (bp->flags & BNXT_FLAG_DOUBLE_DB) if (bp->flags & BNXT_FLAG_DOUBLE_DB)
......
...@@ -469,8 +469,6 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, ...@@ -469,8 +469,6 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
} }
bnxt_cancel_reservations(bp, false); bnxt_cancel_reservations(bp, false);
bnxt_free_ctx_mem(bp); bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
break; break;
} }
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: { case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
...@@ -741,7 +739,7 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver) ...@@ -741,7 +739,7 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver)
} }
/* earlier devices present as an array of raw bytes */ /* earlier devices present as an array of raw bytes */
if (!BNXT_CHIP_P5(bp)) { if (!BNXT_CHIP_P5_PLUS(bp)) {
dim = 0; dim = 0;
i = 0; i = 0;
bits *= 3; /* array of 3 version components */ bits *= 3; /* array of 3 version components */
...@@ -761,7 +759,7 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver) ...@@ -761,7 +759,7 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver)
goto exit; goto exit;
bnxt_copy_from_nvm_data(&ver, data, bits, bytes); bnxt_copy_from_nvm_data(&ver, data, bits, bytes);
if (BNXT_CHIP_P5(bp)) { if (BNXT_CHIP_P5_PLUS(bp)) {
*nvm_cfg_ver <<= 8; *nvm_cfg_ver <<= 8;
*nvm_cfg_ver |= ver.vu8; *nvm_cfg_ver |= ver.vu8;
} else { } else {
...@@ -781,7 +779,7 @@ static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req, ...@@ -781,7 +779,7 @@ static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req,
if (!strlen(buf)) if (!strlen(buf))
return 0; return 0;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
(!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) || (!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) ||
!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE))) !strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE)))
return 0; return 0;
...@@ -1007,7 +1005,7 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, ...@@ -1007,7 +1005,7 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc) if (rc)
return rc; return rc;
if (BNXT_CHIP_P5(bp)) { if (BNXT_CHIP_P5_PLUS(bp)) {
rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH); rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
if (rc) if (rc)
return rc; return rc;
......
...@@ -511,7 +511,7 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) ...@@ -511,7 +511,7 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{ {
if (BNXT_SUPPORTS_TPA(bp)) { if (BNXT_SUPPORTS_TPA(bp)) {
if (bp->max_tpa_v2) { if (bp->max_tpa_v2) {
if (BNXT_CHIP_P5_THOR(bp)) if (BNXT_CHIP_P5(bp))
return BNXT_NUM_TPA_RING_STATS_P5; return BNXT_NUM_TPA_RING_STATS_P5;
return BNXT_NUM_TPA_RING_STATS_P5_SR2; return BNXT_NUM_TPA_RING_STATS_P5_SR2;
} }
...@@ -1322,7 +1322,7 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev) ...@@ -1322,7 +1322,7 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5); return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
return HW_HASH_INDEX_SIZE; return HW_HASH_INDEX_SIZE;
} }
...@@ -3943,7 +3943,7 @@ static int bnxt_run_loopback(struct bnxt *bp) ...@@ -3943,7 +3943,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
int rc; int rc;
cpr = &rxr->bnapi->cp_ring; cpr = &rxr->bnapi->cp_ring;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
cpr = rxr->rx_cpr; cpr = rxr->rx_cpr;
pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh); pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
skb = netdev_alloc_skb(bp->dev, pkt_size); skb = netdev_alloc_skb(bp->dev, pkt_size);
......
...@@ -650,7 +650,7 @@ static int bnxt_map_ptp_regs(struct bnxt *bp) ...@@ -650,7 +650,7 @@ static int bnxt_map_ptp_regs(struct bnxt *bp)
int rc, i; int rc, i;
reg_arr = ptp->refclk_regs; reg_arr = ptp->refclk_regs;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (BNXT_CHIP_P5(bp)) {
rc = bnxt_map_regs(bp, reg_arr, 2, BNXT_PTP_GRC_WIN); rc = bnxt_map_regs(bp, reg_arr, 2, BNXT_PTP_GRC_WIN);
if (rc) if (rc)
return rc; return rc;
...@@ -967,7 +967,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) ...@@ -967,7 +967,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
rc = err; rc = err;
goto out; goto out;
} }
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (BNXT_CHIP_P5(bp)) {
spin_lock_bh(&ptp->ptp_lock); spin_lock_bh(&ptp->ptp_lock);
bnxt_refclk_read(bp, NULL, &ptp->current_time); bnxt_refclk_read(bp, NULL, &ptp->current_time);
WRITE_ONCE(ptp->old_time, ptp->current_time); WRITE_ONCE(ptp->old_time, ptp->current_time);
......
...@@ -536,7 +536,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) ...@@ -536,7 +536,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
if (rc) if (rc)
return rc; return rc;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp); vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
vf_ring_grps = 0; vf_ring_grps = 0;
} else { } else {
...@@ -565,7 +565,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) ...@@ -565,7 +565,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req->min_l2_ctxs = cpu_to_le16(min); req->min_l2_ctxs = cpu_to_le16(min);
req->min_vnics = cpu_to_le16(min); req->min_vnics = cpu_to_le16(min);
req->min_stat_ctx = cpu_to_le16(min); req->min_stat_ctx = cpu_to_le16(min);
if (!(bp->flags & BNXT_FLAG_CHIP_P5)) if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
req->min_hw_ring_grps = cpu_to_le16(min); req->min_hw_ring_grps = cpu_to_le16(min);
} else { } else {
vf_cp_rings /= num_vfs; vf_cp_rings /= num_vfs;
...@@ -602,7 +602,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) ...@@ -602,7 +602,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
req->max_stat_ctx = cpu_to_le16(vf_stat_ctx); req->max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps); req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req->max_rsscos_ctx = cpu_to_le16(vf_rss); req->max_rsscos_ctx = cpu_to_le16(vf_rss);
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
req->max_msix = cpu_to_le16(vf_msix / num_vfs); req->max_msix = cpu_to_le16(vf_msix / num_vfs);
hwrm_req_hold(bp, req); hwrm_req_hold(bp, req);
...@@ -630,7 +630,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) ...@@ -630,7 +630,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
le16_to_cpu(req->min_rsscos_ctx) * n; le16_to_cpu(req->min_rsscos_ctx) * n;
hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n; hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n; hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
hw_resc->max_nqs -= vf_msix; hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs; rc = pf->active_vfs;
......
...@@ -42,7 +42,7 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent) ...@@ -42,7 +42,7 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
for (i = 0; i < num_msix; i++) { for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector; ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i; ent[i].ring_idx = idx + i;
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
ent[i].db_offset = DB_PF_OFFSET_P5; ent[i].db_offset = DB_PF_OFFSET_P5;
if (BNXT_VF(bp)) if (BNXT_VF(bp))
ent[i].db_offset = DB_VF_OFFSET_P5; ent[i].db_offset = DB_VF_OFFSET_P5;
......
...@@ -42,12 +42,12 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, ...@@ -42,12 +42,12 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
/* fill up the first buffer */ /* fill up the first buffer */
prod = txr->tx_prod; prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod]; tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
tx_buf->nr_frags = num_frags; tx_buf->nr_frags = num_frags;
if (xdp) if (xdp)
tx_buf->page = virt_to_head_page(xdp->data); tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
flags = (len << TX_BD_LEN_SHIFT) | flags = (len << TX_BD_LEN_SHIFT) |
((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
bnxt_lhint_arr[len >> 9]; bnxt_lhint_arr[len >> 9];
...@@ -67,10 +67,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, ...@@ -67,10 +67,10 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
WRITE_ONCE(txr->tx_prod, prod); WRITE_ONCE(txr->tx_prod, prod);
/* first fill up the first buffer */ /* first fill up the first buffer */
frag_tx_buf = &txr->tx_buf_ring[prod]; frag_tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
frag_tx_buf->page = skb_frag_page(frag); frag_tx_buf->page = skb_frag_page(frag);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
frag_len = skb_frag_size(frag); frag_len = skb_frag_size(frag);
frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0, frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
...@@ -139,8 +139,8 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) ...@@ -139,8 +139,8 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
if (!budget) if (!budget)
return; return;
while (tx_cons != tx_hw_cons) { while (RING_TX(bp, tx_cons) != tx_hw_cons) {
tx_buf = &txr->tx_buf_ring[tx_cons]; tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
if (tx_buf->action == XDP_REDIRECT) { if (tx_buf->action == XDP_REDIRECT) {
struct pci_dev *pdev = bp->pdev; struct pci_dev *pdev = bp->pdev;
...@@ -160,7 +160,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) ...@@ -160,7 +160,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
frags = tx_buf->nr_frags; frags = tx_buf->nr_frags;
for (j = 0; j < frags; j++) { for (j = 0; j < frags; j++) {
tx_cons = NEXT_TX(tx_cons); tx_cons = NEXT_TX(tx_cons);
tx_buf = &txr->tx_buf_ring[tx_cons]; tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
page_pool_recycle_direct(rxr->page_pool, tx_buf->page); page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
} }
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment