Commit 96c93589 authored by Gidon Studinski's avatar Gidon Studinski Committed by Kalle Valo

wil6210: initialize TX and RX enhanced DMA rings

Enhanced DMA design includes the following rings:
- Single RX descriptor ring is used for all VIFs
- Multiple RX status rings are supported, to allow RSS
- TX descriptor ring is allocated per connection
- A single TX status ring is used for all TX descriptor rings

This patch initializes and frees the above descriptor and
status rings.

The RX SKBs are handled by a new entity of RX buffers manager,
which handles RX buffers, each one points to an allocated SKB.
During Rx completion processing, the driver extracts a buffer
ID which is used as an index to the buffers array.
After the SKB is freed the buffer is moved from the 'active'
list to the 'free' list, indicating it can be used for another
descriptor. During Rx refill, SKBs are allocated and attached
to 'free' buffers. Those buffers are attached to new descriptors
and moved to the 'active' list.

New debugfs entries were added to allow edma configuration:

Run the following command to configure the number of status rings:
echo NUM_OF_STATUS_RINGS > num_rx_status_rings

Run the following command to use extended RX status message for
additional debug fields from HW:
echo 0 > compressed_rx_status

Run the following command to control the size of the TX status ring:
echo TX_STATUS_RING_ORDER > tx_status_ring_order
The status ring size will be 1 << tx_status_ring_order

Run the following command to control the size of the RX status ring:
echo RX_STATUS_RING_ORDER > rx_status_ring_order
Due to HW constrains RX sring order should be bigger than RX ring order
The status ring size will be 1 << rx_status_ring_order

Run the following command to change the number of RX buffer IDs:
echo RX_BUFF_ID_COUNT > rx_buff_id_count
Signed-off-by: default avatarGidon Studinski <gidons@codeaurora.org>
Signed-off-by: default avatarMaya Erez <merez@codeaurora.org>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
parent 10590c6a
......@@ -9,6 +9,7 @@ wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o
wil6210-y += wmi.o
wil6210-y += interrupt.o
wil6210-y += txrx.o
wil6210-y += txrx_edma.o
wil6210-y += debug.o
wil6210-y += rx_reorder.o
wil6210-y += fw.o
......
......@@ -1761,6 +1761,60 @@ static const struct file_operations fops_suspend_stats = {
.open = simple_open,
};
/*---------compressed_rx_status---------*/
static ssize_t wil_compressed_rx_status_write(struct file *file,
const char __user *buf,
size_t len, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct wil6210_priv *wil = s->private;
int compressed_rx_status;
int rc;
rc = kstrtoint_from_user(buf, len, 0, &compressed_rx_status);
if (rc) {
wil_err(wil, "Invalid argument\n");
return rc;
}
if (wil_has_active_ifaces(wil, true, false)) {
wil_err(wil, "cannot change edma config after iface is up\n");
return -EPERM;
}
wil_info(wil, "%sable compressed_rx_status\n",
compressed_rx_status ? "En" : "Dis");
wil->use_compressed_rx_status = compressed_rx_status;
return len;
}
static int
wil_compressed_rx_status_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
seq_printf(s, "%d\n", wil->use_compressed_rx_status);
return 0;
}
static int
wil_compressed_rx_status_seq_open(struct inode *inode, struct file *file)
{
return single_open(file, wil_compressed_rx_status_show,
inode->i_private);
}
static const struct file_operations fops_compressed_rx_status = {
.open = wil_compressed_rx_status_seq_open,
.release = single_release,
.read = seq_read,
.write = wil_compressed_rx_status_write,
.llseek = seq_lseek,
};
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
......@@ -1814,6 +1868,7 @@ static const struct {
{"fw_capabilities", 0444, &fops_fw_capabilities},
{"fw_version", 0444, &fops_fw_version},
{"suspend_stats", 0644, &fops_suspend_stats},
{"compressed_rx_status", 0644, &fops_compressed_rx_status},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
......@@ -1860,6 +1915,10 @@ static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(abft_len, 0644, doff_u8),
WIL_FIELD(wakeup_trigger, 0644, doff_u8),
WIL_FIELD(ring_idle_trsh, 0644, doff_u32),
WIL_FIELD(num_rx_status_rings, 0644, doff_u8),
WIL_FIELD(rx_status_ring_order, 0644, doff_u32),
WIL_FIELD(tx_status_ring_order, 0644, doff_u32),
WIL_FIELD(rx_buff_id_count, 0644, doff_u32),
{},
};
......
......@@ -101,7 +101,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
if (ret < 0)
return ret;
wil_configure_interrupt_moderation(wil);
wil->txrx_ops.configure_interrupt_moderation(wil);
wil_pm_runtime_put(wil);
......
......@@ -186,6 +186,27 @@ void wil_unmask_irq(struct wil6210_priv *wil)
wil6210_unmask_irq_misc(wil, true);
}
void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil)
{
u32 moderation;
wil_s(wil, RGF_INT_GEN_IDLE_TIME_LIMIT, WIL_EDMA_IDLE_TIME_LIMIT_USEC);
wil_s(wil, RGF_INT_GEN_TIME_UNIT_LIMIT, WIL_EDMA_TIME_UNIT_CLK_CYCLES);
/* Update RX and TX moderation */
moderation = wil->rx_max_burst_duration |
(WIL_EDMA_AGG_WATERMARK << WIL_EDMA_AGG_WATERMARK_POS);
wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_0, moderation);
wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_1, moderation);
/* Treat special events as regular
* (set bit 0 to 0x1 and clear bits 1-8)
*/
wil_c(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1FE);
wil_s(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1);
}
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
......
......@@ -21,11 +21,13 @@
#include "wil6210.h"
#include "txrx.h"
#include "txrx_edma.h"
#include "wmi.h"
#include "boot_loader.h"
#define WAIT_FOR_HALP_VOTE_MS 100
#define WAIT_FOR_SCAN_ABORT_MS 1000
#define WIL_DEFAULT_NUM_RX_STATUS_RINGS 1
bool debug_fw; /* = false; */
module_param(debug_fw, bool, 0444);
......@@ -160,6 +162,37 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
}
}
static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
{
struct wil_ring *ring = &wil->ring_tx[id];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
lockdep_assert_held(&wil->mutex);
if (!ring->va)
return;
wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
txdata->mid = U8_MAX;
txdata->enabled = 0; /* no Tx can be in progress or start anew */
spin_unlock_bh(&txdata->lock);
/* napi_synchronize waits for completion of the current NAPI but will
* not prevent the next NAPI run.
* Add a memory barrier to guarantee that txdata->enabled is zeroed
* before napi_synchronize so that the next scheduled NAPI will not
* handle this vring
*/
wmb();
/* make sure NAPI won't touch this vring */
if (test_bit(wil_status_napi_en, wil->status))
napi_synchronize(&wil->napi_tx);
wil->txrx_ops.ring_fini_tx(wil, ring);
}
static void wil_disconnect_cid(struct wil6210_vif *vif, int cid,
u16 reason_code, bool from_event)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
......@@ -456,15 +489,16 @@ static void wil_fw_error_worker(struct work_struct *work)
static int wil_find_free_ring(struct wil6210_priv *wil)
{
int i;
int min_ring_id = wil_get_min_tx_ring_id(wil);
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
if (!wil->ring_tx[i].va)
return i;
}
return -EINVAL;
}
int wil_tx_init(struct wil6210_vif *vif, int cid)
int wil_ring_init_tx(struct wil6210_vif *vif, int cid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc = -EINVAL, ringid;
......@@ -482,7 +516,8 @@ int wil_tx_init(struct wil6210_vif *vif, int cid)
wil_dbg_wmi(wil, "Configure for connection CID %d MID %d ring %d\n",
cid, vif->mid, ringid);
rc = wil_vring_init_tx(vif, ringid, 1 << tx_ring_order, cid, 0);
rc = wil->txrx_ops.ring_init_tx(vif, ringid, 1 << tx_ring_order,
cid, 0);
if (rc)
wil_err(wil, "init TX for CID %d MID %d vring %d failed\n",
cid, vif->mid, ringid);
......@@ -504,7 +539,7 @@ int wil_bcast_init(struct wil6210_vif *vif)
return ri;
vif->bcast_ring = ri;
rc = wil_vring_init_bcast(vif, ri, 1 << bcast_ring_order);
rc = wil->txrx_ops.ring_init_bcast(vif, ri, 1 << bcast_ring_order);
if (rc)
vif->bcast_ring = -1;
......@@ -594,6 +629,22 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->reply_mid = U8_MAX;
wil->max_vifs = 1;
/* edma configuration can be updated via debugfs before allocation */
wil->num_rx_status_rings = WIL_DEFAULT_NUM_RX_STATUS_RINGS;
wil->use_compressed_rx_status = true;
wil->tx_status_ring_order = WIL_TX_SRING_SIZE_ORDER_DEFAULT;
/* Rx status ring size should be bigger than the number of RX buffers
* in order to prevent backpressure on the status ring, which may
* cause HW freeze.
*/
wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
/* Number of RX buffer IDs should be bigger than the RX descriptor
* ring size as in HW reorder flow, the HW can consume additional
* buffers before releasing the previous ones.
*/
wil->rx_buff_id_count = WIL_RX_BUFF_ARR_SIZE_DEFAULT;
return 0;
out_wmi_wq:
......@@ -1312,7 +1363,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
rc = wil_target_reset(wil, no_flash);
wil6210_clear_irq(wil);
wil_enable_irq(wil);
wil_rx_fini(wil);
wil->txrx_ops.rx_fini(wil);
wil->txrx_ops.tx_fini(wil);
if (rc) {
if (!no_flash)
wil_bl_crash_info(wil, true);
......@@ -1365,7 +1417,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
clear_bit(wil_status_resetting, wil->status);
if (load_fw) {
wil_configure_interrupt_moderation(wil);
wil_unmask_irq(wil);
/* we just started MAC, wait for FW ready */
......@@ -1380,6 +1431,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
}
wil->txrx_ops.configure_interrupt_moderation(wil);
rc = wil_restore_vifs(wil);
if (rc) {
wil_err(wil, "failed to restore vifs, rc %d\n", rc);
......@@ -1434,8 +1487,12 @@ int __wil_up(struct wil6210_priv *wil)
if (rc)
return rc;
/* Rx VRING. After MAC and beacon */
rc = wil_rx_init(wil, 1 << rx_ring_order);
/* Rx RING. After MAC and beacon */
rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order);
if (rc)
return rc;
rc = wil->txrx_ops.tx_init(wil);
if (rc)
return rc;
......@@ -1596,3 +1653,11 @@ void wil_halp_unvote(struct wil6210_priv *wil)
mutex_unlock(&wil->halp.lock);
}
void wil_init_txrx_ops(struct wil6210_priv *wil)
{
if (wil->use_enhanced_dma_hw)
wil_init_txrx_ops_edma(wil);
else
wil_init_txrx_ops_legacy_dma(wil);
}
......@@ -102,6 +102,7 @@ int wil_set_capabilities(struct wil6210_priv *wil)
wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
set_bit(hw_capa_no_flash, wil->hw_capa);
wil->use_enhanced_dma_hw = true;
break;
default:
wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
......@@ -111,6 +112,8 @@ int wil_set_capabilities(struct wil6210_priv *wil)
return -EINVAL;
}
wil_init_txrx_ops(wil);
iccm_section = wil_find_fw_mapping("fw_code");
if (!iccm_section) {
wil_err(wil, "fw_code section not found in fw_mapping\n");
......@@ -266,8 +269,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.fw_recovery = wil_platform_rop_fw_recovery,
};
u32 bar_size = pci_resource_len(pdev, 0);
int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
int i;
int dma_addr_size[] = {64, 48, 40, 32}; /* keep descending order */
int i, start_idx;
/* check HW */
dev_info(&pdev->dev, WIL_NAME
......@@ -302,24 +305,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto if_free;
}
/* rollback to err_plat */
/* device supports >32bit addresses */
for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
rc = dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(dma_addr_size[i]));
if (rc) {
dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
dma_addr_size[i], rc);
continue;
}
dev_info(dev, "using dma mask %d", dma_addr_size[i]);
wil->dma_addr_size = dma_addr_size[i];
break;
}
if (wil->dma_addr_size == 0)
goto err_plat;
rc = pci_enable_device(pdev);
if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
......@@ -359,6 +344,28 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
goto err_iounmap;
}
/* device supports >32bit addresses.
* for legacy DMA start from 48 bit.
*/
start_idx = wil->use_enhanced_dma_hw ? 0 : 1;
for (i = start_idx; i < ARRAY_SIZE(dma_addr_size); i++) {
rc = dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(dma_addr_size[i]));
if (rc) {
dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
dma_addr_size[i], rc);
continue;
}
dev_info(dev, "using dma mask %d", dma_addr_size[i]);
wil->dma_addr_size = dma_addr_size[i];
break;
}
if (wil->dma_addr_size == 0)
goto err_iounmap;
wil6210_clear_irq(wil);
/* FW should raise IRQ when ready */
......
......@@ -202,14 +202,13 @@ static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
}
}
static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring,
int tx)
static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
{
struct device *dev = wil_to_dev(wil);
size_t sz = vring->size * sizeof(vring->va[0]);
lockdep_assert_held(&wil->mutex);
if (tx) {
if (!vring->is_rx) {
int vring_index = vring - wil->ring_tx;
wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
......@@ -226,7 +225,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring,
u16 dmalen;
struct wil_ctx *ctx;
if (tx) {
if (!vring->is_rx) {
struct vring_tx_desc dd, *d = &dd;
volatile struct vring_tx_desc *_d =
&vring->va[vring->swtail].tx.legacy;
......@@ -843,7 +842,7 @@ static void wil_rx_buf_len_init(struct wil6210_priv *wil)
}
}
int wil_rx_init(struct wil6210_priv *wil, u16 size)
static int wil_rx_init(struct wil6210_priv *wil, u16 size)
{
struct wil_ring *vring = &wil->ring_rx;
int rc;
......@@ -858,6 +857,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
wil_rx_buf_len_init(wil);
vring->size = size;
vring->is_rx = true;
rc = wil_vring_alloc(wil, vring);
if (rc)
return rc;
......@@ -872,22 +872,22 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
return 0;
err_free:
wil_vring_free(wil, vring, 0);
wil_vring_free(wil, vring);
return rc;
}
void wil_rx_fini(struct wil6210_priv *wil)
static void wil_rx_fini(struct wil6210_priv *wil)
{
struct wil_ring *vring = &wil->ring_rx;
wil_dbg_misc(wil, "rx_fini\n");
if (vring->va)
wil_vring_free(wil, vring, 0);
wil_vring_free(wil, vring);
}
static inline void wil_tx_data_init(struct wil_ring_tx_data *txdata)
void wil_tx_data_init(struct wil_ring_tx_data *txdata)
{
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = 0;
......@@ -903,7 +903,7 @@ static inline void wil_tx_data_init(struct wil_ring_tx_data *txdata)
spin_unlock_bh(&txdata->lock);
}
int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
int cid, int tid)
{
struct wil6210_priv *wil = vif_to_wil(vif);
......@@ -948,6 +948,7 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
}
wil_tx_data_init(txdata);
vring->is_rx = false;
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
......@@ -987,7 +988,7 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
txdata->dot1x_open = false;
txdata->enabled = 0;
spin_unlock_bh(&txdata->lock);
wil_vring_free(wil, vring, 1);
wil_vring_free(wil, vring);
wil->ring2cid_tid[id][0] = WIL6210_MAX_CID;
wil->ring2cid_tid[id][1] = 0;
......@@ -1032,6 +1033,7 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
}
wil_tx_data_init(txdata);
vring->is_rx = false;
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
......@@ -1069,43 +1071,12 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
txdata->enabled = 0;
txdata->dot1x_open = false;
spin_unlock_bh(&txdata->lock);
wil_vring_free(wil, vring, 1);
wil_vring_free(wil, vring);
out:
return rc;
}
void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
{
struct wil_ring *vring = &wil->ring_tx[id];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
lockdep_assert_held(&wil->mutex);
if (!vring->va)
return;
wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
txdata->mid = U8_MAX;
txdata->enabled = 0; /* no Tx can be in progress or start anew */
spin_unlock_bh(&txdata->lock);
/* napi_synchronize waits for completion of the current NAPI but will
* not prevent the next NAPI run.
* Add a memory barrier to guarantee that txdata->enabled is zeroed
* before napi_synchronize so that the next scheduled NAPI will not
* handle this vring
*/
wmb();
/* make sure NAPI won't touch this vring */
if (test_bit(wil_status_napi_en, wil->status))
napi_synchronize(&wil->napi_tx);
wil_vring_free(wil, vring, 1);
}
static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
struct wil6210_vif *vif,
struct sk_buff *skb)
......@@ -1113,12 +1084,13 @@ static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
int i;
struct ethhdr *eth = (void *)skb->data;
int cid = wil_find_cid(wil, vif->mid, eth->h_dest);
int min_ring_id = wil_get_min_tx_ring_id(wil);
if (cid < 0)
return NULL;
/* TODO: fix for multiple TID */
for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
if (!wil->ring_tx_data[i].dot1x_open &&
skb->protocol != cpu_to_be16(ETH_P_PAE))
continue;
......@@ -1153,12 +1125,13 @@ static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
int i;
u8 cid;
struct wil_ring_tx_data *txdata;
int min_ring_id = wil_get_min_tx_ring_id(wil);
/* In the STA mode, it is expected to have only 1 VRING
* for the AP we connected to.
* find 1-st vring eligible for this skb and use it.
*/
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
ring = &wil->ring_tx[i];
txdata = &wil->ring_tx_data[i];
if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
......@@ -1234,9 +1207,10 @@ static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
struct ethhdr *eth = (void *)skb->data;
char *src = eth->h_source;
struct wil_ring_tx_data *txdata, *txdata2;
int min_ring_id = wil_get_min_tx_ring_id(wil);
/* find 1-st vring eligible for data */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
v = &wil->ring_tx[i];
txdata = &wil->ring_tx_data[i];
if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
......@@ -2201,3 +2175,25 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
return done;
}
static inline int wil_tx_init(struct wil6210_priv *wil)
{
return 0;
}
static inline void wil_tx_fini(struct wil6210_priv *wil) {}
void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
{
wil->txrx_ops.configure_interrupt_moderation =
wil_configure_interrupt_moderation;
/* TX ops */
wil->txrx_ops.ring_init_tx = wil_vring_init_tx;
wil->txrx_ops.ring_fini_tx = wil_vring_free;
wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
wil->txrx_ops.tx_init = wil_tx_init;
wil->txrx_ops.tx_fini = wil_tx_fini;
/* RX ops */
wil->txrx_ops.rx_init = wil_rx_init;
wil->txrx_ops.rx_fini = wil_rx_fini;
}
......@@ -570,6 +570,12 @@ static inline int wil_ring_avail_tx(struct wil_ring *ring)
return ring->size - wil_ring_used_tx(ring) - 1;
}
static inline int wil_get_min_tx_ring_id(struct wil6210_priv *wil)
{
/* In Enhanced DMA ring 0 is reserved for RX */
return wil->use_enhanced_dma_hw ? 1 : 0;
}
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
......@@ -578,5 +584,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
int size, u16 ssn);
void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
struct wil_tid_ampdu_rx *r);
void wil_tx_data_init(struct wil_ring_tx_data *txdata);
void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil);
#endif /* WIL6210_TXRX_H */
This diff is collapsed.
......@@ -19,6 +19,25 @@
#include "wil6210.h"
/* limit status ring size in range [ring size..max ring size] */
#define WIL_SRING_SIZE_ORDER_MIN (WIL_RING_SIZE_ORDER_MIN)
#define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX)
/* RX sring order should be bigger than RX ring order */
#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (11)
#define WIL_TX_SRING_SIZE_ORDER_DEFAULT (12)
#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (1536)
#define WIL_DEFAULT_RX_STATUS_RING_ID 0
#define WIL_RX_DESC_RING_ID 0
#define WIL_RX_STATUS_IRQ_IDX 0
#define WIL_TX_STATUS_IRQ_IDX 1
#define WIL_EDMA_AGG_WATERMARK (0xffff)
#define WIL_EDMA_AGG_WATERMARK_POS (16)
#define WIL_EDMA_IDLE_TIME_LIMIT_USEC (50)
#define WIL_EDMA_TIME_UNIT_CLK_CYCLES (330) /* fits 1 usec */
/* Enhanced Rx descriptor - MAC part
* [dword 0] : Reserved
* [dword 1] : Reserved
......@@ -216,7 +235,7 @@ struct wil_ring_tx_status {
* bit 22..23 : CB mode:2 - The CB Mode: 0-DMG, 1-EDMG, 2-Wide
* bit 24..27 : Data Offset:4 - The data offset, a code that describe the
* payload shift from the beginning of the buffer:
* 0 - 0 Bytes, 1 - 2 Bytes, 2 - 6 Bytes
* 0 - 0 Bytes, 3 - 2 Bytes
* bit 28 : A-MSDU Present:1 - The QoS (b7) A-MSDU present field
* bit 29 : A-MSDU Type:1 The QoS (b8) A-MSDU Type field
* bit 30 : A-MPDU:1 - Packet is part of aggregated MPDU
......@@ -286,5 +305,38 @@ struct wil_rx_status_extended {
struct wil_rx_status_extension ext;
};
static inline u32 wil_ring_next_head(struct wil_ring *ring)
{
return (ring->swhead + 1) % ring->size;
}
static inline void wil_desc_set_addr_edma(struct wil_ring_dma_addr *addr,
__le16 *addr_high_high,
dma_addr_t pa)
{
addr->addr_low = cpu_to_le32(lower_32_bits(pa));
addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
*addr_high_high = cpu_to_le16((u16)(upper_32_bits(pa) >> 16));
}
static inline
dma_addr_t wil_tx_desc_get_addr_edma(struct wil_ring_tx_enhanced_dma *dma)
{
return le32_to_cpu(dma->addr.addr_low) |
((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
((u64)le16_to_cpu(dma->addr_high_high) << 48);
}
static inline
dma_addr_t wil_rx_desc_get_addr_edma(struct wil_ring_rx_enhanced_dma *dma)
{
return le32_to_cpu(dma->addr.addr_low) |
((u64)le16_to_cpu(dma->addr.addr_high) << 32) |
((u64)le16_to_cpu(dma->addr_high_high) << 48);
}
void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil);
void wil_init_txrx_ops_edma(struct wil6210_priv *wil);
#endif /* WIL6210_TXRX_EDMA_H */
......@@ -37,6 +37,9 @@ extern bool rx_large_buf;
extern bool debug_fw;
extern bool disable_ap_sme;
struct wil6210_priv;
struct wil6210_vif;
#define WIL_NAME "wil6210"
#define WIL_FW_NAME_DEFAULT "wil6210.fw"
......@@ -307,6 +310,18 @@ struct RGF_ICR {
#define RGF_CAF_PLL_LOCK_STATUS (0x88afec)
#define BIT_CAF_OSC_DIG_XTAL_STABLE BIT(0)
/* eDMA */
#define RGF_INT_COUNT_ON_SPECIAL_EVT (0x8b62d8)
#define RGF_INT_CTRL_INT_GEN_CFG_0 (0x8bc000)
#define RGF_INT_CTRL_INT_GEN_CFG_1 (0x8bc004)
#define RGF_INT_GEN_TIME_UNIT_LIMIT (0x8bc0c8)
#define RGF_INT_GEN_CTRL (0x8bc0ec)
#define BIT_CONTROL_0 BIT(0)
#define RGF_INT_GEN_IDLE_TIME_LIMIT (0x8bc134)
#define USER_EXT_USER_PMU_3 (0x88d00c)
#define BIT_PMU_DEVICE_RDY BIT(0)
......@@ -512,6 +527,24 @@ struct wil_status_ring {
struct wil_ring_rx_data rx_data;
};
/**
* struct tx_rx_ops - different TX/RX ops for legacy and enhanced
* DMA flow
*/
struct wil_txrx_ops {
void (*configure_interrupt_moderation)(struct wil6210_priv *wil);
/* TX ops */
int (*ring_init_tx)(struct wil6210_vif *vif, int ring_id,
int size, int cid, int tid);
void (*ring_fini_tx)(struct wil6210_priv *wil, struct wil_ring *ring);
int (*ring_init_bcast)(struct wil6210_vif *vif, int id, int size);
int (*tx_init)(struct wil6210_priv *wil);
void (*tx_fini)(struct wil6210_priv *wil);
/* RX ops */
int (*rx_init)(struct wil6210_priv *wil, u16 ring_size);
void (*rx_fini)(struct wil6210_priv *wil);
};
/**
* Additional data for Tx ring
*/
......@@ -848,12 +881,15 @@ struct wil6210_priv {
struct wil_ring ring_tx[WIL6210_MAX_TX_RINGS];
struct wil_ring_tx_data ring_tx_data[WIL6210_MAX_TX_RINGS];
struct wil_status_ring srings[WIL6210_MAX_STATUS_RINGS];
int num_rx_status_rings;
u8 num_rx_status_rings;
int tx_sring_idx;
u8 ring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
struct wil_sta_info sta[WIL6210_MAX_CID];
u32 ring_idle_trsh; /* HW fetches up to 16 descriptors at once */
u32 dma_addr_size; /* indicates dma addr size */
struct wil_rx_buff_mgmt rx_buff_mgmt;
bool use_enhanced_dma_hw;
struct wil_txrx_ops txrx_ops;
struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
/* statistics */
......@@ -896,6 +932,12 @@ struct wil6210_priv {
u32 rgf_fw_assert_code_addr;
u32 rgf_ucode_assert_code_addr;
u32 iccm_base;
/* relevant only for eDMA */
bool use_compressed_rx_status;
u32 rx_status_ring_order;
u32 tx_status_ring_order;
u32 rx_buff_id_count;
};
#define wil_to_wiphy(i) (i->wiphy)
......@@ -1168,14 +1210,10 @@ void wil_probe_client_flush(struct wil6210_vif *vif);
void wil_probe_client_worker(struct work_struct *work);
void wil_disconnect_worker(struct work_struct *work);
int wil_rx_init(struct wil6210_priv *wil, u16 size);
void wil_rx_fini(struct wil6210_priv *wil);
void wil_init_txrx_ops(struct wil6210_priv *wil);
/* TX API */
int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
int cid, int tid);
void wil_ring_fini_tx(struct wil6210_priv *wil, int id);
int wil_tx_init(struct wil6210_vif *vif, int cid);
int wil_ring_init_tx(struct wil6210_vif *vif, int cid);
int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size);
int wil_bcast_init(struct wil6210_vif *vif);
void wil_bcast_fini(struct wil6210_vif *vif);
......@@ -1227,4 +1265,14 @@ int wmi_start_sched_scan(struct wil6210_priv *wil,
int wmi_stop_sched_scan(struct wil6210_priv *wil);
int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len);
/* WMI for enhanced DMA */
int wil_wmi_tx_sring_cfg(struct wil6210_priv *wil, int ring_id);
int wil_wmi_cfg_def_rx_offload(struct wil6210_priv *wil,
u16 max_rx_pl_per_desc);
int wil_wmi_rx_sring_add(struct wil6210_priv *wil, u16 ring_id);
int wil_wmi_rx_desc_ring_add(struct wil6210_priv *wil, int status_ring_id);
int wil_wmi_tx_desc_ring_add(struct wil6210_vif *vif, int ring_id, int cid,
int tid);
int wil_wmi_bcast_desc_ring_add(struct wil6210_vif *vif, int ring_id);
#endif /* __WIL6210_H__ */
This diff is collapsed.
......@@ -148,8 +148,8 @@ enum wmi_command_id {
WMI_CFG_RX_CHAIN_CMDID = 0x820,
WMI_VRING_CFG_CMDID = 0x821,
WMI_BCAST_VRING_CFG_CMDID = 0x822,
WMI_VRING_BA_EN_CMDID = 0x823,
WMI_VRING_BA_DIS_CMDID = 0x824,
WMI_RING_BA_EN_CMDID = 0x823,
WMI_RING_BA_DIS_CMDID = 0x824,
WMI_RCP_ADDBA_RESP_CMDID = 0x825,
WMI_RCP_DELBA_CMDID = 0x826,
WMI_SET_SSID_CMDID = 0x827,
......@@ -163,6 +163,7 @@ enum wmi_command_id {
WMI_BF_SM_MGMT_CMDID = 0x838,
WMI_BF_RXSS_MGMT_CMDID = 0x839,
WMI_BF_TRIG_CMDID = 0x83A,
WMI_RCP_ADDBA_RESP_EDMA_CMDID = 0x83B,
WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
WMI_SET_SECTORS_CMDID = 0x849,
......@@ -235,6 +236,12 @@ enum wmi_command_id {
WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6,
WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7,
WMI_BF_CONTROL_CMDID = 0x9AA,
WMI_TX_STATUS_RING_ADD_CMDID = 0x9C0,
WMI_RX_STATUS_RING_ADD_CMDID = 0x9C1,
WMI_TX_DESC_RING_ADD_CMDID = 0x9C2,
WMI_RX_DESC_RING_ADD_CMDID = 0x9C3,
WMI_BCAST_DESC_RING_ADD_CMDID = 0x9C4,
WMI_CFG_DEF_RX_OFFLOAD_CMDID = 0x9C5,
WMI_SCHEDULING_SCHEME_CMDID = 0xA01,
WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02,
WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03,
......@@ -781,18 +788,90 @@ struct wmi_lo_power_calib_from_otp_event {
u8 reserved[3];
} __packed;
/* WMI_VRING_BA_EN_CMDID */
struct wmi_vring_ba_en_cmd {
u8 ringid;
struct wmi_edma_ring_cfg {
__le64 ring_mem_base;
/* size in number of items */
__le16 ring_size;
u8 ring_id;
u8 reserved;
} __packed;
enum wmi_rx_msg_type {
WMI_RX_MSG_TYPE_COMPRESSED = 0x00,
WMI_RX_MSG_TYPE_EXTENDED = 0x01,
};
struct wmi_tx_status_ring_add_cmd {
struct wmi_edma_ring_cfg ring_cfg;
u8 irq_index;
u8 reserved[3];
} __packed;
struct wmi_rx_status_ring_add_cmd {
struct wmi_edma_ring_cfg ring_cfg;
u8 irq_index;
/* wmi_rx_msg_type */
u8 rx_msg_type;
u8 reserved[2];
} __packed;
struct wmi_cfg_def_rx_offload_cmd {
__le16 max_msdu_size;
__le16 max_rx_pl_per_desc;
u8 decap_trans_type;
u8 l2_802_3_offload_ctrl;
u8 l2_nwifi_offload_ctrl;
u8 vlan_id;
u8 nwifi_ds_trans_type;
u8 l3_l4_ctrl;
u8 reserved[6];
} __packed;
struct wmi_tx_desc_ring_add_cmd {
struct wmi_edma_ring_cfg ring_cfg;
__le16 max_msdu_size;
/* Correlated status ring (0-63) */
u8 status_ring_id;
u8 cid;
u8 tid;
u8 encap_trans_type;
u8 mac_ctrl;
u8 to_resolution;
u8 agg_max_wsize;
u8 reserved[3];
struct wmi_vring_cfg_schd schd_params;
} __packed;
struct wmi_rx_desc_ring_add_cmd {
struct wmi_edma_ring_cfg ring_cfg;
u8 irq_index;
/* 0-63 status rings */
u8 status_ring_id;
u8 reserved[2];
__le64 sw_tail_host_addr;
} __packed;
struct wmi_bcast_desc_ring_add_cmd {
struct wmi_edma_ring_cfg ring_cfg;
__le16 max_msdu_size;
/* Correlated status ring (0-63) */
u8 status_ring_id;
u8 encap_trans_type;
u8 reserved[4];
} __packed;
/* WMI_RING_BA_EN_CMDID */
struct wmi_ring_ba_en_cmd {
u8 ring_id;
u8 agg_max_wsize;
__le16 ba_timeout;
u8 amsdu;
u8 reserved[3];
} __packed;
/* WMI_VRING_BA_DIS_CMDID */
struct wmi_vring_ba_dis_cmd {
u8 ringid;
/* WMI_RING_BA_DIS_CMDID */
struct wmi_ring_ba_dis_cmd {
u8 ring_id;
u8 reserved;
__le16 reason;
} __packed;
......@@ -950,6 +1029,21 @@ struct wmi_rcp_addba_resp_cmd {
u8 reserved[2];
} __packed;
/* WMI_RCP_ADDBA_RESP_EDMA_CMDID */
struct wmi_rcp_addba_resp_edma_cmd {
u8 cid;
u8 tid;
u8 dialog_token;
u8 reserved;
__le16 status_code;
/* ieee80211_ba_parameterset field to send */
__le16 ba_param_set;
__le16 ba_timeout;
u8 status_ring_id;
/* wmi_cfg_rx_chain_cmd_reorder_type */
u8 reorder_type;
} __packed;
/* WMI_RCP_DELBA_CMDID */
struct wmi_rcp_delba_cmd {
/* Used for cid less than 8. For higher cid set
......@@ -1535,7 +1629,7 @@ enum wmi_event_id {
WMI_BF_CTRL_DONE_EVENTID = 0x1862,
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
WMI_VRING_EN_EVENTID = 0x1865,
WMI_RING_EN_EVENTID = 0x1865,
WMI_GET_RF_STATUS_EVENTID = 0x1866,
WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867,
WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID = 0x1868,
......@@ -1587,6 +1681,11 @@ enum wmi_event_id {
WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6,
WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7,
WMI_BF_CONTROL_EVENTID = 0x19AA,
WMI_TX_STATUS_RING_CFG_DONE_EVENTID = 0x19C0,
WMI_RX_STATUS_RING_CFG_DONE_EVENTID = 0x19C1,
WMI_TX_DESC_RING_CFG_DONE_EVENTID = 0x19C2,
WMI_RX_DESC_RING_CFG_DONE_EVENTID = 0x19C3,
WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID = 0x19C5,
WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01,
WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02,
WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03,
......@@ -1997,6 +2096,49 @@ struct wmi_rcp_addba_resp_sent_event {
u8 reserved2[2];
} __packed;
/* WMI_TX_STATUS_RING_CFG_DONE_EVENTID */
struct wmi_tx_status_ring_cfg_done_event {
u8 ring_id;
/* wmi_fw_status */
u8 status;
u8 reserved[2];
__le32 ring_tail_ptr;
} __packed;
/* WMI_RX_STATUS_RING_CFG_DONE_EVENTID */
struct wmi_rx_status_ring_cfg_done_event {
u8 ring_id;
/* wmi_fw_status */
u8 status;
u8 reserved[2];
__le32 ring_tail_ptr;
} __packed;
/* WMI_CFG_DEF_RX_OFFLOAD_DONE_EVENTID */
struct wmi_cfg_def_rx_offload_done_event {
/* wmi_fw_status */
u8 status;
u8 reserved[3];
} __packed;
/* WMI_TX_DESC_RING_CFG_DONE_EVENTID */
struct wmi_tx_desc_ring_cfg_done_event {
u8 ring_id;
/* wmi_fw_status */
u8 status;
u8 reserved[2];
__le32 ring_tail_ptr;
} __packed;
/* WMI_RX_DESC_RING_CFG_DONE_EVENTID */
struct wmi_rx_desc_ring_cfg_done_event {
u8 ring_id;
/* wmi_fw_status */
u8 status;
u8 reserved[2];
__le32 ring_tail_ptr;
} __packed;
/* WMI_RCP_ADDBA_REQ_EVENTID */
struct wmi_rcp_addba_req_event {
/* Used for cid less than 8. For higher cid set
......@@ -2047,9 +2189,9 @@ struct wmi_data_port_open_event {
u8 reserved[3];
} __packed;
/* WMI_VRING_EN_EVENTID */
struct wmi_vring_en_event {
u8 vring_index;
/* WMI_RING_EN_EVENTID */
struct wmi_ring_en_event {
u8 ring_index;
u8 reserved[3];
} __packed;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment