Commit 3dc2c13b authored by Lazar Alexei's avatar Lazar Alexei Committed by Kalle Valo

wil6210: support 40bit DMA addresses

Add the option to support 40bit addresses since some platforms
may not support 48bits but support 40bits
Signed-off-by: default avatarLazar Alexei <qca_ailizaro@qca.qualcomm.com>
Signed-off-by: default avatarMaya Erez <qca_merez@qca.qualcomm.com>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
parent a5dc6883
...@@ -204,6 +204,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -204,6 +204,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.fw_recovery = wil_platform_rop_fw_recovery, .fw_recovery = wil_platform_rop_fw_recovery,
}; };
u32 bar_size = pci_resource_len(pdev, 0); u32 bar_size = pci_resource_len(pdev, 0);
int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
int i;
/* check HW */ /* check HW */
dev_info(&pdev->dev, WIL_NAME dev_info(&pdev->dev, WIL_NAME
...@@ -239,21 +241,23 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -239,21 +241,23 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} }
/* rollback to err_plat */ /* rollback to err_plat */
/* device supports 48bit addresses */ /* device supports >32bit addresses */
rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
if (rc) { rc = dma_set_mask_and_coherent(dev,
dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc); DMA_BIT_MASK(dma_addr_size[i]));
rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (rc) { if (rc) {
dev_err(dev, dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
"dma_set_mask_and_coherent(32) failed: %d\n", dma_addr_size[i], rc);
rc); continue;
goto err_plat;
} }
} else { dev_info(dev, "using dma mask %d", dma_addr_size[i]);
wil->use_extended_dma_addr = 1; wil->dma_addr_size = dma_addr_size[i];
break;
} }
if (wil->dma_addr_size == 0)
goto err_plat;
rc = pci_enable_device(pdev); rc = pci_enable_device(pdev);
if (rc && pdev->msi_enabled == 0) { if (rc && pdev->msi_enabled == 0) {
wil_err(wil, wil_err(wil,
......
...@@ -111,14 +111,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil, ...@@ -111,14 +111,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
* *
* HW has limitation that all vrings addresses must share the same * HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that, * upper 16 msb bits part of 48 bits address. To workaround that,
* if we are using 48 bit addresses switch to 32 bit allocation * if we are using more than 32 bit addresses switch to 32 bit
* before allocating vring memory. * allocation before allocating vring memory.
* *
* There's no check for the return value of dma_set_mask_and_coherent, * There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during * since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again * initialization in this system it will not fail if we set it again
*/ */
if (wil->use_extended_dma_addr) if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
pmc->pring_va = dma_alloc_coherent(dev, pmc->pring_va = dma_alloc_coherent(dev,
...@@ -126,8 +126,9 @@ void wil_pmc_alloc(struct wil6210_priv *wil, ...@@ -126,8 +126,9 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
&pmc->pring_pa, &pmc->pring_pa,
GFP_KERNEL); GFP_KERNEL);
if (wil->use_extended_dma_addr) if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(wil->dma_addr_size));
wil_dbg_misc(wil, wil_dbg_misc(wil,
"pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
......
...@@ -178,14 +178,14 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) ...@@ -178,14 +178,14 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
* *
* HW has limitation that all vrings addresses must share the same * HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that, * upper 16 msb bits part of 48 bits address. To workaround that,
* if we are using 48 bit addresses switch to 32 bit allocation * if we are using more than 32 bit addresses switch to 32 bit
* before allocating vring memory. * allocation before allocating vring memory.
* *
* There's no check for the return value of dma_set_mask_and_coherent, * There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during * since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again * initialization in this system it will not fail if we set it again
*/ */
if (wil->use_extended_dma_addr) if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
...@@ -195,8 +195,9 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring) ...@@ -195,8 +195,9 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
return -ENOMEM; return -ENOMEM;
} }
if (wil->use_extended_dma_addr) if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(wil->dma_addr_size));
/* initially, all descriptors are SW owned /* initially, all descriptors are SW owned
* For Tx and Rx, ownership bit is at the same location, thus * For Tx and Rx, ownership bit is at the same location, thus
......
...@@ -704,7 +704,7 @@ struct wil6210_priv { ...@@ -704,7 +704,7 @@ struct wil6210_priv {
struct wil_sta_info sta[WIL6210_MAX_CID]; struct wil_sta_info sta[WIL6210_MAX_CID];
int bcast_vring; int bcast_vring;
u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */ u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */
bool use_extended_dma_addr; /* indicates whether we are using 48 bits */ u32 dma_addr_size; /* indicates dma addr size */
/* scan */ /* scan */
struct cfg80211_scan_request *scan_request; struct cfg80211_scan_request *scan_request;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment