Commit ec4d8e7c authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

bnxt_en: Add TPA ID mapping logic for 57500 chips.

The new TPA feature on 57500 supports a larger number of concurrent TPAs
(up to 1024) divided among the functions.  We need to add some logic to
map the hardware TPA ID to a software index that keeps track of each TPA
in progress.  A 1:1 direct mapping without translation would be too
wasteful as we would have to allocate 1024 TPA structures for each RX
ring on each PCI function.
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bfcd8d79
...@@ -1152,6 +1152,33 @@ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) ...@@ -1152,6 +1152,33 @@ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
rxr->rx_next_cons = 0xffff; rxr->rx_next_cons = 0xffff;
} }
static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
{
struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
u16 idx = agg_id & MAX_TPA_P5_MASK;
if (test_bit(idx, map->agg_idx_bmap))
idx = find_first_zero_bit(map->agg_idx_bmap,
BNXT_AGG_IDX_BMAP_SIZE);
__set_bit(idx, map->agg_idx_bmap);
map->agg_id_tbl[agg_id] = idx;
return idx;
}
static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
{
struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
__clear_bit(idx, map->agg_idx_bmap);
}
static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
{
struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
return map->agg_id_tbl[agg_id];
}
static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_tpa_start_cmp *tpa_start, struct rx_tpa_start_cmp *tpa_start,
struct rx_tpa_start_cmp_ext *tpa_start1) struct rx_tpa_start_cmp_ext *tpa_start1)
...@@ -1162,10 +1189,12 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, ...@@ -1162,10 +1189,12 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_bd *prod_bd; struct rx_bd *prod_bd;
dma_addr_t mapping; dma_addr_t mapping;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5) {
agg_id = TPA_START_AGG_ID_P5(tpa_start); agg_id = TPA_START_AGG_ID_P5(tpa_start);
else agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
} else {
agg_id = TPA_START_AGG_ID(tpa_start); agg_id = TPA_START_AGG_ID(tpa_start);
}
cons = tpa_start->rx_tpa_start_cmp_opaque; cons = tpa_start->rx_tpa_start_cmp_opaque;
prod = rxr->rx_prod; prod = rxr->rx_prod;
cons_rx_buf = &rxr->rx_buf_ring[cons]; cons_rx_buf = &rxr->rx_buf_ring[cons];
...@@ -1445,6 +1474,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, ...@@ -1445,6 +1474,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (bp->flags & BNXT_FLAG_CHIP_P5) { if (bp->flags & BNXT_FLAG_CHIP_P5) {
agg_id = TPA_END_AGG_ID_P5(tpa_end); agg_id = TPA_END_AGG_ID_P5(tpa_end);
agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
tpa_info = &rxr->rx_tpa[agg_id]; tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(agg_bufs != tpa_info->agg_count)) { if (unlikely(agg_bufs != tpa_info->agg_count)) {
...@@ -1454,6 +1484,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, ...@@ -1454,6 +1484,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
} }
tpa_info->agg_count = 0; tpa_info->agg_count = 0;
*event |= BNXT_AGG_EVENT; *event |= BNXT_AGG_EVENT;
bnxt_free_agg_idx(rxr, agg_id);
idx = agg_id; idx = agg_id;
gro = !!(bp->flags & BNXT_FLAG_GRO); gro = !!(bp->flags & BNXT_FLAG_GRO);
} else { } else {
...@@ -1560,6 +1591,7 @@ static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, ...@@ -1560,6 +1591,7 @@ static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 agg_id = TPA_AGG_AGG_ID(rx_agg); u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
struct bnxt_tpa_info *tpa_info; struct bnxt_tpa_info *tpa_info;
agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
tpa_info = &rxr->rx_tpa[agg_id]; tpa_info = &rxr->rx_tpa[agg_id];
BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
...@@ -2383,6 +2415,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) ...@@ -2383,6 +2415,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
for (i = 0; i < bp->rx_nr_rings; i++) { for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_tpa_idx_map *map;
int j; int j;
if (rxr->rx_tpa) { if (rxr->rx_tpa) {
...@@ -2453,6 +2486,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) ...@@ -2453,6 +2486,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
__free_page(rxr->rx_page); __free_page(rxr->rx_page);
rxr->rx_page = NULL; rxr->rx_page = NULL;
} }
map = rxr->rx_tpa_idx_map;
if (map)
memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
} }
} }
...@@ -2548,6 +2584,8 @@ static void bnxt_free_tpa_info(struct bnxt *bp) ...@@ -2548,6 +2584,8 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) { for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
kfree(rxr->rx_tpa_idx_map);
rxr->rx_tpa_idx_map = NULL;
if (rxr->rx_tpa) { if (rxr->rx_tpa) {
kfree(rxr->rx_tpa[0].agg_arr); kfree(rxr->rx_tpa[0].agg_arr);
rxr->rx_tpa[0].agg_arr = NULL; rxr->rx_tpa[0].agg_arr = NULL;
...@@ -2586,6 +2624,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp) ...@@ -2586,6 +2624,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
return -ENOMEM; return -ENOMEM;
for (j = 1; j < bp->max_tpa; j++) for (j = 1; j < bp->max_tpa; j++)
rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS; rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
GFP_KERNEL);
if (!rxr->rx_tpa_idx_map)
return -ENOMEM;
} }
return 0; return 0;
} }
......
...@@ -555,6 +555,7 @@ struct nqe_cn { ...@@ -555,6 +555,7 @@ struct nqe_cn {
#define MAX_TPA 64 #define MAX_TPA 64
#define MAX_TPA_P5 256 #define MAX_TPA_P5 256
#define MAX_TPA_P5_MASK (MAX_TPA_P5 - 1)
#define MAX_TPA_SEGS_P5 0x3f #define MAX_TPA_SEGS_P5 0x3f
#if (BNXT_PAGE_SHIFT == 16) #if (BNXT_PAGE_SHIFT == 16)
...@@ -841,6 +842,13 @@ struct bnxt_tpa_info { ...@@ -841,6 +842,13 @@ struct bnxt_tpa_info {
struct rx_agg_cmp *agg_arr; struct rx_agg_cmp *agg_arr;
}; };
#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG)
struct bnxt_tpa_idx_map {
u16 agg_id_tbl[1024];
unsigned long agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE];
};
struct bnxt_rx_ring_info { struct bnxt_rx_ring_info {
struct bnxt_napi *bnapi; struct bnxt_napi *bnapi;
u16 rx_prod; u16 rx_prod;
...@@ -868,6 +876,7 @@ struct bnxt_rx_ring_info { ...@@ -868,6 +876,7 @@ struct bnxt_rx_ring_info {
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
struct bnxt_tpa_info *rx_tpa; struct bnxt_tpa_info *rx_tpa;
struct bnxt_tpa_idx_map *rx_tpa_idx_map;
struct bnxt_ring_struct rx_ring_struct; struct bnxt_ring_struct rx_ring_struct;
struct bnxt_ring_struct rx_agg_ring_struct; struct bnxt_ring_struct rx_agg_ring_struct;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment