Commit 6fe19886 authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

bnxt_en: Refactor bnxt_ring_struct.

Move the DMA page table and vmem fields in bnxt_ring_struct to a new
bnxt_ring_mem_info struct.  This will allow context memory management
for a new device to re-use some of the existing infrastructure.
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 74706afa
...@@ -2202,60 +2202,60 @@ static void bnxt_free_skbs(struct bnxt *bp) ...@@ -2202,60 +2202,60 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp); bnxt_free_rx_skbs(bp);
} }
static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{ {
struct pci_dev *pdev = bp->pdev; struct pci_dev *pdev = bp->pdev;
int i; int i;
for (i = 0; i < ring->nr_pages; i++) { for (i = 0; i < rmem->nr_pages; i++) {
if (!ring->pg_arr[i]) if (!rmem->pg_arr[i])
continue; continue;
dma_free_coherent(&pdev->dev, ring->page_size, dma_free_coherent(&pdev->dev, rmem->page_size,
ring->pg_arr[i], ring->dma_arr[i]); rmem->pg_arr[i], rmem->dma_arr[i]);
ring->pg_arr[i] = NULL; rmem->pg_arr[i] = NULL;
} }
if (ring->pg_tbl) { if (rmem->pg_tbl) {
dma_free_coherent(&pdev->dev, ring->nr_pages * 8, dma_free_coherent(&pdev->dev, rmem->nr_pages * 8,
ring->pg_tbl, ring->pg_tbl_map); rmem->pg_tbl, rmem->pg_tbl_map);
ring->pg_tbl = NULL; rmem->pg_tbl = NULL;
} }
if (ring->vmem_size && *ring->vmem) { if (rmem->vmem_size && *rmem->vmem) {
vfree(*ring->vmem); vfree(*rmem->vmem);
*ring->vmem = NULL; *rmem->vmem = NULL;
} }
} }
static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{ {
int i;
struct pci_dev *pdev = bp->pdev; struct pci_dev *pdev = bp->pdev;
int i;
if (ring->nr_pages > 1) { if (rmem->nr_pages > 1) {
ring->pg_tbl = dma_alloc_coherent(&pdev->dev, rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
ring->nr_pages * 8, rmem->nr_pages * 8,
&ring->pg_tbl_map, &rmem->pg_tbl_map,
GFP_KERNEL); GFP_KERNEL);
if (!ring->pg_tbl) if (!rmem->pg_tbl)
return -ENOMEM; return -ENOMEM;
} }
for (i = 0; i < ring->nr_pages; i++) { for (i = 0; i < rmem->nr_pages; i++) {
ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev, rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
ring->page_size, rmem->page_size,
&ring->dma_arr[i], &rmem->dma_arr[i],
GFP_KERNEL); GFP_KERNEL);
if (!ring->pg_arr[i]) if (!rmem->pg_arr[i])
return -ENOMEM; return -ENOMEM;
if (ring->nr_pages > 1) if (rmem->nr_pages > 1)
ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]); rmem->pg_tbl[i] = cpu_to_le64(rmem->dma_arr[i]);
} }
if (ring->vmem_size) { if (rmem->vmem_size) {
*ring->vmem = vzalloc(ring->vmem_size); *rmem->vmem = vzalloc(rmem->vmem_size);
if (!(*ring->vmem)) if (!(*rmem->vmem))
return -ENOMEM; return -ENOMEM;
} }
return 0; return 0;
...@@ -2285,10 +2285,10 @@ static void bnxt_free_rx_rings(struct bnxt *bp) ...@@ -2285,10 +2285,10 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
rxr->rx_agg_bmap = NULL; rxr->rx_agg_bmap = NULL;
ring = &rxr->rx_ring_struct; ring = &rxr->rx_ring_struct;
bnxt_free_ring(bp, ring); bnxt_free_ring(bp, &ring->ring_mem);
ring = &rxr->rx_agg_ring_struct; ring = &rxr->rx_agg_ring_struct;
bnxt_free_ring(bp, ring); bnxt_free_ring(bp, &ring->ring_mem);
} }
} }
...@@ -2315,7 +2315,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) ...@@ -2315,7 +2315,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
if (rc < 0) if (rc < 0)
return rc; return rc;
rc = bnxt_alloc_ring(bp, ring); rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc) if (rc)
return rc; return rc;
...@@ -2323,7 +2323,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) ...@@ -2323,7 +2323,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
u16 mem_size; u16 mem_size;
ring = &rxr->rx_agg_ring_struct; ring = &rxr->rx_agg_ring_struct;
rc = bnxt_alloc_ring(bp, ring); rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc) if (rc)
return rc; return rc;
...@@ -2366,7 +2366,7 @@ static void bnxt_free_tx_rings(struct bnxt *bp) ...@@ -2366,7 +2366,7 @@ static void bnxt_free_tx_rings(struct bnxt *bp)
ring = &txr->tx_ring_struct; ring = &txr->tx_ring_struct;
bnxt_free_ring(bp, ring); bnxt_free_ring(bp, &ring->ring_mem);
} }
} }
...@@ -2397,7 +2397,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) ...@@ -2397,7 +2397,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
ring = &txr->tx_ring_struct; ring = &txr->tx_ring_struct;
rc = bnxt_alloc_ring(bp, ring); rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc) if (rc)
return rc; return rc;
...@@ -2450,7 +2450,7 @@ static void bnxt_free_cp_rings(struct bnxt *bp) ...@@ -2450,7 +2450,7 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
cpr = &bnapi->cp_ring; cpr = &bnapi->cp_ring;
ring = &cpr->cp_ring_struct; ring = &cpr->cp_ring_struct;
bnxt_free_ring(bp, ring); bnxt_free_ring(bp, &ring->ring_mem);
} }
} }
...@@ -2471,7 +2471,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) ...@@ -2471,7 +2471,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
cpr = &bnapi->cp_ring; cpr = &bnapi->cp_ring;
ring = &cpr->cp_ring_struct; ring = &cpr->cp_ring_struct;
rc = bnxt_alloc_ring(bp, ring); rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc) if (rc)
return rc; return rc;
...@@ -2489,6 +2489,7 @@ static void bnxt_init_ring_struct(struct bnxt *bp) ...@@ -2489,6 +2489,7 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_ring_mem_info *rmem;
struct bnxt_cp_ring_info *cpr; struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr; struct bnxt_rx_ring_info *rxr;
struct bnxt_tx_ring_info *txr; struct bnxt_tx_ring_info *txr;
...@@ -2499,31 +2500,34 @@ static void bnxt_init_ring_struct(struct bnxt *bp) ...@@ -2499,31 +2500,34 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
cpr = &bnapi->cp_ring; cpr = &bnapi->cp_ring;
ring = &cpr->cp_ring_struct; ring = &cpr->cp_ring_struct;
ring->nr_pages = bp->cp_nr_pages; rmem = &ring->ring_mem;
ring->page_size = HW_CMPD_RING_SIZE; rmem->nr_pages = bp->cp_nr_pages;
ring->pg_arr = (void **)cpr->cp_desc_ring; rmem->page_size = HW_CMPD_RING_SIZE;
ring->dma_arr = cpr->cp_desc_mapping; rmem->pg_arr = (void **)cpr->cp_desc_ring;
ring->vmem_size = 0; rmem->dma_arr = cpr->cp_desc_mapping;
rmem->vmem_size = 0;
rxr = bnapi->rx_ring; rxr = bnapi->rx_ring;
if (!rxr) if (!rxr)
goto skip_rx; goto skip_rx;
ring = &rxr->rx_ring_struct; ring = &rxr->rx_ring_struct;
ring->nr_pages = bp->rx_nr_pages; rmem = &ring->ring_mem;
ring->page_size = HW_RXBD_RING_SIZE; rmem->nr_pages = bp->rx_nr_pages;
ring->pg_arr = (void **)rxr->rx_desc_ring; rmem->page_size = HW_RXBD_RING_SIZE;
ring->dma_arr = rxr->rx_desc_mapping; rmem->pg_arr = (void **)rxr->rx_desc_ring;
ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; rmem->dma_arr = rxr->rx_desc_mapping;
ring->vmem = (void **)&rxr->rx_buf_ring; rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
rmem->vmem = (void **)&rxr->rx_buf_ring;
ring = &rxr->rx_agg_ring_struct; ring = &rxr->rx_agg_ring_struct;
ring->nr_pages = bp->rx_agg_nr_pages; rmem = &ring->ring_mem;
ring->page_size = HW_RXBD_RING_SIZE; rmem->nr_pages = bp->rx_agg_nr_pages;
ring->pg_arr = (void **)rxr->rx_agg_desc_ring; rmem->page_size = HW_RXBD_RING_SIZE;
ring->dma_arr = rxr->rx_agg_desc_mapping; rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; rmem->dma_arr = rxr->rx_agg_desc_mapping;
ring->vmem = (void **)&rxr->rx_agg_ring; rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
rmem->vmem = (void **)&rxr->rx_agg_ring;
skip_rx: skip_rx:
txr = bnapi->tx_ring; txr = bnapi->tx_ring;
...@@ -2531,12 +2535,13 @@ static void bnxt_init_ring_struct(struct bnxt *bp) ...@@ -2531,12 +2535,13 @@ static void bnxt_init_ring_struct(struct bnxt *bp)
continue; continue;
ring = &txr->tx_ring_struct; ring = &txr->tx_ring_struct;
ring->nr_pages = bp->tx_nr_pages; rmem = &ring->ring_mem;
ring->page_size = HW_RXBD_RING_SIZE; rmem->nr_pages = bp->tx_nr_pages;
ring->pg_arr = (void **)txr->tx_desc_ring; rmem->page_size = HW_RXBD_RING_SIZE;
ring->dma_arr = txr->tx_desc_mapping; rmem->pg_arr = (void **)txr->tx_desc_ring;
ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; rmem->dma_arr = txr->tx_desc_mapping;
ring->vmem = (void **)&txr->tx_buf_ring; rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
rmem->vmem = (void **)&txr->tx_buf_ring;
} }
} }
...@@ -2546,8 +2551,8 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) ...@@ -2546,8 +2551,8 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
u32 prod; u32 prod;
struct rx_bd **rx_buf_ring; struct rx_bd **rx_buf_ring;
rx_buf_ring = (struct rx_bd **)ring->pg_arr; rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
for (i = 0, prod = 0; i < ring->nr_pages; i++) { for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
int j; int j;
struct rx_bd *rxbd; struct rx_bd *rxbd;
...@@ -4343,19 +4348,20 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, ...@@ -4343,19 +4348,20 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
int rc = 0, err = 0; int rc = 0, err = 0;
struct hwrm_ring_alloc_input req = {0}; struct hwrm_ring_alloc_input req = {0};
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
struct bnxt_ring_grp_info *grp_info; struct bnxt_ring_grp_info *grp_info;
u16 ring_id; u16 ring_id;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
req.enables = 0; req.enables = 0;
if (ring->nr_pages > 1) { if (rmem->nr_pages > 1) {
req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map); req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
/* Page size is in log2 units */ /* Page size is in log2 units */
req.page_size = BNXT_PAGE_SHIFT; req.page_size = BNXT_PAGE_SHIFT;
req.page_tbl_depth = 1; req.page_tbl_depth = 1;
} else { } else {
req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]); req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
} }
req.fbo = 0; req.fbo = 0;
/* Association of ring index with doorbell index and MSIX number */ /* Association of ring index with doorbell index and MSIX number */
......
...@@ -577,7 +577,7 @@ struct bnxt_sw_rx_agg_bd { ...@@ -577,7 +577,7 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping; dma_addr_t mapping;
}; };
struct bnxt_ring_struct { struct bnxt_ring_mem_info {
int nr_pages; int nr_pages;
int page_size; int page_size;
void **pg_arr; void **pg_arr;
...@@ -588,6 +588,10 @@ struct bnxt_ring_struct { ...@@ -588,6 +588,10 @@ struct bnxt_ring_struct {
int vmem_size; int vmem_size;
void **vmem; void **vmem;
};
struct bnxt_ring_struct {
struct bnxt_ring_mem_info ring_mem;
u16 fw_ring_id; /* Ring id filled by Chimp FW */ u16 fw_ring_id; /* Ring id filled by Chimp FW */
union { union {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment