Commit 5c051207 authored by Geetha sowjanya's avatar Geetha sowjanya Committed by David S. Miller

octeontx2-pf: cn10k: Use runtime allocated LMTLINE region

The current driver uses static LMTST region allocated by firmware.
This memory gets populated as PF/VF BAR2. RVU PF/VF driver ioremap
the memory as device memory for NIX/NPA operation. Since the memory
is mapped as device memory we see performance degration. To address
this issue this patch implements runtime memory allocation.
RVU PF/VF allocates memory during device probe and share the base
address with RVU AF. RVU AF then configure the LMT MAP table
accordingly.
Signed-off-by: default avatarGeetha sowjanya <gakula@marvell.com>
Signed-off-by: default avatarSunil Kovvuri Goutham <sgoutham@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 893ae972
......@@ -22,69 +22,52 @@ static struct dev_hw_ops cn10k_hw_ops = {
.refill_pool_ptrs = cn10k_refill_pool_ptrs,
};
int cn10k_pf_lmtst_init(struct otx2_nic *pf)
int cn10k_lmtst_init(struct otx2_nic *pfvf)
{
int size, num_lines;
u64 base;
if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
pf->hw_ops = &otx2_hw_ops;
struct lmtst_tbl_setup_req *req;
int qcount, err;
if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
pfvf->hw_ops = &otx2_hw_ops;
return 0;
}
pf->hw_ops = &cn10k_hw_ops;
base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
(MBOX_SIZE * (pf->total_vfs + 1));
size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) -
(MBOX_SIZE * (pf->total_vfs + 1));
pf->hw.lmt_base = ioremap(base, size);
pfvf->hw_ops = &cn10k_hw_ops;
qcount = pfvf->hw.max_queues;
/* LMTST lines allocation
* qcount = num_online_cpus();
* NPA = TX + RX + XDP.
* NIX = TX * 32 (For Burst SQE flush).
*/
pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32);
pfvf->npa_lmt_lines = qcount * 3;
pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE;
if (!pf->hw.lmt_base) {
dev_err(pf->dev, "Unable to map PF LMTST region\n");
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
if (!req) {
mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
/* FIXME: Get the num of LMTST lines from LMT table */
pf->tot_lmt_lines = size / LMT_LINE_SIZE;
num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) /
pf->hw.tx_queues;
/* Number of LMT lines per SQ queues */
pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE;
return 0;
}
req->use_local_lmt_region = true;
int cn10k_vf_lmtst_init(struct otx2_nic *vf)
{
int size, num_lines;
if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) {
vf->hw_ops = &otx2_hw_ops;
return 0;
err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
LMT_LINE_SIZE);
if (err) {
mutex_unlock(&pfvf->mbox.lock);
return err;
}
pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
req->lmt_iova = (u64)pfvf->dync_lmt->iova;
vf->hw_ops = &cn10k_hw_ops;
size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM);
vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev,
PCI_MBOX_BAR_NUM),
size);
if (!vf->hw.lmt_base) {
dev_err(vf->dev, "Unable to map VF LMTST region\n");
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
vf->tot_lmt_lines = size / LMT_LINE_SIZE;
/* LMTST lines per SQ */
num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) /
vf->hw.tx_queues;
vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE;
return 0;
}
EXPORT_SYMBOL(cn10k_vf_lmtst_init);
EXPORT_SYMBOL(cn10k_lmtst_init);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
{
......@@ -93,9 +76,11 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
struct otx2_snd_queue *sq;
sq = &pfvf->qset.sq[qidx];
sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base +
sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base +
(qidx * pfvf->nix_lmt_size));
sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE);
/* Get memory to put this msg */
aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
if (!aq)
......@@ -158,15 +143,13 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
{
struct otx2_nic *pfvf = dev;
int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines);
u64 val = 0, tar_addr = 0;
/* FIXME: val[0:10] LMT_ID.
* [12:15] no of LMTST - 1 in the burst.
* [19:63] data size of each LMTST in the burst except first.
*/
val = (lmt_id & 0x7FF);
val = (sq->lmt_id & 0x7FF);
/* Target address for LMTST flush tells HW how many 128bit
* words are present.
* tar_addr[6:4] size of first LMTST - 1 in units of 128b.
......
......@@ -12,8 +12,7 @@
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_pf_lmtst_init(struct otx2_nic *pf);
int cn10k_vf_lmtst_init(struct otx2_nic *vf);
int cn10k_lmtst_init(struct otx2_nic *pfvf);
int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
......
......@@ -218,8 +218,8 @@ struct otx2_hw {
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
#define NIX_LMTID_BASE 72 /* RX + TX + XDP */
void __iomem *lmt_base;
#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
u64 *lmt_base;
u64 *npa_lmt_base;
u64 *nix_lmt_base;
};
......@@ -363,8 +363,9 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
/* LMTST Lines info */
struct qmem *dync_lmt;
u16 tot_lmt_lines;
u16 nix_lmt_lines;
u16 npa_lmt_lines;
u32 nix_lmt_size;
struct otx2_ptp *ptp;
......
......@@ -1533,10 +1533,10 @@ int otx2_open(struct net_device *netdev)
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
/* Reserve LMT lines for NPA AURA batch free */
pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base;
pf->hw.npa_lmt_base = pf->hw.lmt_base;
/* Reserve LMT lines for NIX TX */
pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base +
(NIX_LMTID_BASE * LMT_LINE_SIZE));
pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base +
(pf->npa_lmt_lines * LMT_LINE_SIZE));
}
err = otx2_init_hw_resources(pf);
......@@ -2526,7 +2526,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
err = cn10k_pf_lmtst_init(pf);
err = cn10k_lmtst_init(pf);
if (err)
goto err_detach_rsrc;
......@@ -2630,8 +2630,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
if (hw->lmt_base)
iounmap(hw->lmt_base);
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
qmem_free(pf->dev, pf->dync_lmt);
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
otx2_disable_mbox_intr(pf);
......@@ -2772,9 +2772,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(pf);
otx2_shutdown_tc(pf);
otx2_detach_resources(&pf->mbox);
if (pf->hw.lmt_base)
iounmap(pf->hw.lmt_base);
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
qmem_free(pf->dev, pf->dync_lmt);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
......
......@@ -83,6 +83,7 @@ struct otx2_snd_queue {
u16 num_sqbs;
u16 sqe_thresh;
u8 sqe_per_sqb;
u32 lmt_id;
u64 io_addr;
u64 *aura_fc_addr;
u64 *lmt_addr;
......
......@@ -609,7 +609,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
err = cn10k_vf_lmtst_init(vf);
err = cn10k_lmtst_init(vf);
if (err)
goto err_detach_rsrc;
......@@ -667,8 +667,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
err_detach_rsrc:
if (hw->lmt_base)
iounmap(hw->lmt_base);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
err_disable_mbox_intr:
otx2vf_disable_mbox_intr(vf);
......@@ -700,10 +700,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
destroy_workqueue(vf->otx2_wq);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
if (vf->hw.lmt_base)
iounmap(vf->hw.lmt_base);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment