Commit f2de576d authored by Hamad Kadmany's avatar Hamad Kadmany Committed by Kalle Valo

wil6210: set dma mask to reflect device capability

device supports 48bit addresses, reflect that by
setting the dma mask accordingly.
Signed-off-by: default avatarHamad Kadmany <qca_hkadmany@qca.qualcomm.com>
Signed-off-by: default avatarMaya Erez <qca_merez@qca.qualcomm.com>
Signed-off-by: default avatarKalle Valo <kvalo@qca.qualcomm.com>
parent 4aa2d31f
...@@ -211,6 +211,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -211,6 +211,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(dev, "wil_if_alloc failed: %d\n", rc); dev_err(dev, "wil_if_alloc failed: %d\n", rc);
return rc; return rc;
} }
wil->pdev = pdev; wil->pdev = pdev;
pci_set_drvdata(pdev, wil); pci_set_drvdata(pdev, wil);
/* rollback to if_free */ /* rollback to if_free */
...@@ -224,6 +225,21 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -224,6 +225,21 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} }
/* rollback to err_plat */ /* rollback to err_plat */
/* device supports 48bit addresses */
rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (rc) {
dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(dev,
"dma_set_mask_and_coherent(32) failed: %d\n",
rc);
goto err_plat;
}
} else {
wil->use_extended_dma_addr = 1;
}
rc = pci_enable_device(pdev); rc = pci_enable_device(pdev);
if (rc) { if (rc) {
wil_err(wil, wil_err(wil,
......
...@@ -107,13 +107,28 @@ void wil_pmc_alloc(struct wil6210_priv *wil, ...@@ -107,13 +107,28 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
/* Allocate pring buffer and descriptors. /* Allocate pring buffer and descriptors.
* vring->va should be aligned on its size rounded up to power of 2 * vring->va should be aligned on its size rounded up to power of 2
* This is granted by the dma_alloc_coherent * This is granted by the dma_alloc_coherent.
*
* HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that,
* if we are using 48 bit addresses switch to 32 bit allocation
* before allocating vring memory.
*
* There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again
*/ */
if (wil->use_extended_dma_addr)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
pmc->pring_va = dma_alloc_coherent(dev, pmc->pring_va = dma_alloc_coherent(dev,
sizeof(struct vring_tx_desc) * num_descriptors, sizeof(struct vring_tx_desc) * num_descriptors,
&pmc->pring_pa, &pmc->pring_pa,
GFP_KERNEL); GFP_KERNEL);
if (wil->use_extended_dma_addr)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
wil_dbg_misc(wil, wil_dbg_misc(wil,
"pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
pmc->pring_va, &pmc->pring_pa, pmc->pring_va, &pmc->pring_pa,
......
...@@ -123,15 +123,32 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) ...@@ -123,15 +123,32 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
vring->va = NULL; vring->va = NULL;
return -ENOMEM; return -ENOMEM;
} }
/* vring->va should be aligned on its size rounded up to power of 2 /* vring->va should be aligned on its size rounded up to power of 2
* This is granted by the dma_alloc_coherent * This is granted by the dma_alloc_coherent.
*
* HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that,
* if we are using 48 bit addresses switch to 32 bit allocation
* before allocating vring memory.
*
* There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again
*/ */
if (wil->use_extended_dma_addr)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
if (!vring->va) { if (!vring->va) {
kfree(vring->ctx); kfree(vring->ctx);
vring->ctx = NULL; vring->ctx = NULL;
return -ENOMEM; return -ENOMEM;
} }
if (wil->use_extended_dma_addr)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
/* initially, all descriptors are SW owned /* initially, all descriptors are SW owned
* For Tx and Rx, ownership bit is at the same location, thus * For Tx and Rx, ownership bit is at the same location, thus
* we can use any * we can use any
......
...@@ -657,6 +657,7 @@ struct wil6210_priv { ...@@ -657,6 +657,7 @@ struct wil6210_priv {
u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */ u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
struct wil_sta_info sta[WIL6210_MAX_CID]; struct wil_sta_info sta[WIL6210_MAX_CID];
int bcast_vring; int bcast_vring;
bool use_extended_dma_addr; /* indicates whether we are using 48 bits */
/* scan */ /* scan */
struct cfg80211_scan_request *scan_request; struct cfg80211_scan_request *scan_request;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment