Commit bf9b608e authored by Kalle Valo's avatar Kalle Valo

Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for 4.19. Major changes:

wcn36xx

* fix WEP in client mode

wil6210

* add support for Talyn-MB (Talyn ver 2.0) device

* add support for enhanced DMA firmware feature
parents aea5f654 f0eea277
......@@ -274,7 +274,7 @@ ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
struct ath10k *ar = htc->ar;
int bundle_cnt = len / sizeof(*report);
if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) {
ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
bundle_cnt);
return -EINVAL;
......@@ -655,7 +655,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
htc->max_msgs_per_htc_bundle =
min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
HTC_HOST_MAX_MSG_PER_BUNDLE);
HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
ath10k_dbg(ar, ATH10K_DBG_HTC,
"Extended ready message. RX bundle size: %d\n",
htc->max_msgs_per_htc_bundle);
......
......@@ -50,7 +50,8 @@ struct ath10k;
* 4-byte aligned.
*/
#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8
#define HTC_HOST_MAX_MSG_PER_TX_BUNDLE 16
enum ath10k_htc_tx_flags {
ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
......@@ -58,6 +59,7 @@ enum ath10k_htc_tx_flags {
};
enum ath10k_htc_rx_flags {
ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01,
ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
};
......
......@@ -268,11 +268,12 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
htt->rx_ring.fill_cnt));
spin_unlock_bh(&htt->rx_ring.lock);
if (ret)
ath10k_htt_rx_ring_free(htt);
spin_unlock_bh(&htt->rx_ring.lock);
return ret;
}
......@@ -284,7 +285,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
skb_queue_purge(&htt->rx_in_ord_compl_q);
skb_queue_purge(&htt->tx_fetch_ind_q);
spin_lock_bh(&htt->rx_ring.lock);
ath10k_htt_rx_ring_free(htt);
spin_unlock_bh(&htt->rx_ring.lock);
dma_free_coherent(htt->ar->dev,
ath10k_htt_get_rx_ring_size(htt),
......@@ -1089,7 +1092,7 @@ static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
status = IEEE80211_SKB_RXCB(skb);
*status = *rx_status;
__skb_queue_tail(&ar->htt.rx_msdus_q, skb);
skb_queue_tail(&ar->htt.rx_msdus_q, skb);
}
static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
......@@ -2810,7 +2813,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
__skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
return false;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
......@@ -2874,7 +2877,7 @@ static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
if (skb_queue_empty(&ar->htt.rx_msdus_q))
break;
skb = __skb_dequeue(&ar->htt.rx_msdus_q);
skb = skb_dequeue(&ar->htt.rx_msdus_q);
if (!skb)
break;
ath10k_process_rx(ar, skb);
......@@ -2905,7 +2908,7 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
goto exit;
}
while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_in_ord_ind(ar, skb);
spin_unlock_bh(&htt->rx_ring.lock);
......
......@@ -1056,7 +1056,7 @@ static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
return HTT_DATA_TX_EXT_TID_MGMT;
else if (cb->flags & ATH10K_SKB_F_QOS)
return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
else
return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
}
......
......@@ -4026,7 +4026,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
drv_priv);
/* Prevent aggressive sta/tid taking over tx queue */
max = 16;
max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
ret = 0;
while (ath10k_mac_tx_can_push(hw, txq) && max--) {
ret = ath10k_mac_tx_push_txq(hw, txq);
......@@ -4047,6 +4047,7 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
rcu_read_unlock();
spin_unlock_bh(&ar->txqs_lock);
}
EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
/************/
/* Scanning */
......@@ -4287,7 +4288,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
struct ieee80211_txq *f_txq;
struct ath10k_txq *f_artxq;
int ret = 0;
int max = 16;
int max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
spin_lock_bh(&ar->txqs_lock);
if (list_empty(&artxq->list))
......
......@@ -30,6 +30,7 @@
#include "debug.h"
#include "hif.h"
#include "htc.h"
#include "mac.h"
#include "targaddrs.h"
#include "trace.h"
#include "sdio.h"
......@@ -396,6 +397,7 @@ static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
int ret;
payload_len = le16_to_cpu(htc_hdr->len);
skb->len = payload_len + sizeof(struct ath10k_htc_hdr);
if (trailer_present) {
trailer = skb->data + sizeof(*htc_hdr) +
......@@ -434,12 +436,14 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
enum ath10k_htc_ep_id id;
int ret, i, *n_lookahead_local;
u32 *lookaheads_local;
int lookahead_idx = 0;
for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
lookaheads_local = lookaheads;
n_lookahead_local = n_lookahead;
id = ((struct ath10k_htc_hdr *)&lookaheads[i])->eid;
id = ((struct ath10k_htc_hdr *)
&lookaheads[lookahead_idx++])->eid;
if (id >= ATH10K_HTC_EP_COUNT) {
ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
......@@ -462,6 +466,7 @@ static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
/* Only read lookahead's from RX trailers
* for the last packet in a bundle.
*/
lookahead_idx--;
lookaheads_local = NULL;
n_lookahead_local = NULL;
}
......@@ -505,11 +510,11 @@ static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
*bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_BUNDLE) {
if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
ath10k_warn(ar,
"HTC bundle length %u exceeds maximum %u\n",
le16_to_cpu(htc_hdr->len),
HTC_HOST_MAX_MSG_PER_BUNDLE);
HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
return -ENOMEM;
}
......@@ -600,6 +605,9 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
* ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
* packet skb's have been allocated in the previous step.
*/
if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
act_len,
full_len,
......@@ -1342,6 +1350,8 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func)
break;
} while (time_before(jiffies, timeout) && !done);
ath10k_mac_tx_push_pending(ar);
sdio_claim_host(ar_sdio->func);
if (ret && ret != -ECANCELED)
......
......@@ -96,14 +96,14 @@
* way:
*
* Let's assume that each packet in a bundle of the maximum bundle size
* (HTC_HOST_MAX_MSG_PER_BUNDLE) has the HTC header bundle count set
* to the maximum value (HTC_HOST_MAX_MSG_PER_BUNDLE).
* (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set
* to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
*
* in this case the driver must allocate
* (HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE) skb's.
* (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) skb's.
*/
#define ATH10K_SDIO_MAX_RX_MSGS \
(HTC_HOST_MAX_MSG_PER_BUNDLE * HTC_HOST_MAX_MSG_PER_BUNDLE)
(HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE)
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
......
......@@ -1076,6 +1076,8 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = reg->eeprom_rd;
arg->low_5ghz_chan = reg->low_5ghz_chan;
arg->high_5ghz_chan = reg->high_5ghz_chan;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = svc_bmap;
arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
......@@ -1614,10 +1616,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
ie_len = roundup(arg->ie_len, 4);
len = (sizeof(*tlv) + sizeof(*cmd)) +
(arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
(arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
(arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
(arg->ie_len ? sizeof(*tlv) + ie_len : 0);
sizeof(*tlv) + chan_len +
sizeof(*tlv) + ssid_len +
sizeof(*tlv) + bssid_len +
sizeof(*tlv) + ie_len;
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
......
......@@ -342,7 +342,7 @@ struct ath_chanctx {
struct ath_beacon_config beacon;
struct ath9k_hw_cal_data caldata;
struct timespec tsf_ts;
struct timespec64 tsf_ts;
u64 tsf_val;
u32 last_beacon;
......@@ -1021,7 +1021,7 @@ struct ath_softc {
struct ath_offchannel offchannel;
struct ath_chanctx *next_chan;
struct completion go_beacon;
struct timespec last_event_time;
struct timespec64 last_event_time;
#endif
unsigned long driver_data;
......
......@@ -233,9 +233,9 @@ static const char *chanctx_state_string(enum ath_chanctx_state state)
static u32 chanctx_event_delta(struct ath_softc *sc)
{
u64 ms;
struct timespec ts, *old;
struct timespec64 ts, *old;
getrawmonotonic(&ts);
ktime_get_raw_ts64(&ts);
old = &sc->last_event_time;
ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000;
......@@ -334,7 +334,7 @@ ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx)
static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
{
struct ath_chanctx *prev, *cur;
struct timespec ts;
struct timespec64 ts;
u32 cur_tsf, prev_tsf, beacon_int;
s32 offset;
......@@ -346,7 +346,7 @@ static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
if (!prev->switch_after_beacon)
return;
getrawmonotonic(&ts);
ktime_get_raw_ts64(&ts);
cur_tsf = (u32) cur->tsf_val +
ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts);
......@@ -1230,7 +1230,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_chanctx *old_ctx;
struct timespec ts;
struct timespec64 ts;
bool measure_time = false;
bool send_ps = false;
bool queues_stopped = false;
......@@ -1260,7 +1260,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_unlock_bh(&sc->chan_lock);
if (sc->next_chan == &sc->offchannel.chan) {
getrawmonotonic(&ts);
ktime_get_raw_ts64(&ts);
measure_time = true;
}
......@@ -1277,7 +1277,7 @@ void ath_chanctx_set_next(struct ath_softc *sc, bool force)
spin_lock_bh(&sc->chan_lock);
if (sc->cur_chan != &sc->offchannel.chan) {
getrawmonotonic(&sc->cur_chan->tsf_ts);
ktime_get_raw_ts64(&sc->cur_chan->tsf_ts);
sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah);
}
}
......
......@@ -138,6 +138,7 @@ static void hif_usb_mgmt_cb(struct urb *urb)
{
struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
struct hif_device_usb *hif_dev;
unsigned long flags;
bool txok = true;
if (!cmd || !cmd->skb || !cmd->hif_dev)
......@@ -158,14 +159,14 @@ static void hif_usb_mgmt_cb(struct urb *urb)
* If the URBs are being flushed, no need to complete
* this packet.
*/
spin_lock(&hif_dev->tx.tx_lock);
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
spin_unlock(&hif_dev->tx.tx_lock);
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
dev_kfree_skb_any(cmd->skb);
kfree(cmd);
return;
}
spin_unlock(&hif_dev->tx.tx_lock);
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
break;
default:
......
......@@ -1107,25 +1107,26 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
unsigned long flags;
spin_lock(&priv->rx.rxbuflock);
spin_lock_irqsave(&priv->rx.rxbuflock, flags);
list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
if (!tmp_buf->in_process) {
rxbuf = tmp_buf;
break;
}
}
spin_unlock(&priv->rx.rxbuflock);
spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
if (rxbuf == NULL) {
ath_dbg(common, ANY, "No free RX buffer\n");
goto err;
}
spin_lock(&priv->rx.rxbuflock);
spin_lock_irqsave(&priv->rx.rxbuflock, flags);
rxbuf->skb = skb;
rxbuf->in_process = true;
spin_unlock(&priv->rx.rxbuflock);
spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
tasklet_schedule(&priv->rx_tasklet);
return;
......
......@@ -1835,13 +1835,13 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
return -EINVAL;
}
u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur)
u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur)
{
struct timespec ts;
struct timespec64 ts;
s64 usec;
if (!cur) {
getrawmonotonic(&ts);
ktime_get_raw_ts64(&ts);
cur = &ts;
}
......@@ -1859,7 +1859,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 saveLedState;
u32 saveDefAntenna;
u32 macStaId1;
struct timespec tsf_ts;
struct timespec64 tsf_ts;
u32 tsf_offset;
u64 tsf = 0;
int r;
......@@ -1905,7 +1905,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
/* Save TSF before chip reset, a cold reset clears it */
getrawmonotonic(&tsf_ts);
ktime_get_raw_ts64(&tsf_ts);
tsf = ath9k_hw_gettsf64(ah);
saveLedState = REG_READ(ah, AR_CFG_LED) &
......
......@@ -1060,7 +1060,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
u64 ath9k_hw_gettsf64(struct ath_hw *ah);
void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur);
u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
......
......@@ -1865,7 +1865,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
tsf -= le64_to_cpu(avp->tsf_adjust);
getrawmonotonic(&avp->chanctx->tsf_ts);
ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
if (sc->cur_chan == avp->chanctx)
ath9k_hw_settsf64(sc->sc_ah, tsf);
avp->chanctx->tsf_val = tsf;
......@@ -1881,7 +1881,7 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
getrawmonotonic(&avp->chanctx->tsf_ts);
ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
if (sc->cur_chan == avp->chanctx)
ath9k_hw_reset_tsf(sc->sc_ah);
avp->chanctx->tsf_val = 0;
......
......@@ -209,6 +209,7 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
{
struct wmi *wmi = priv;
struct wmi_cmd_hdr *hdr;
unsigned long flags;
u16 cmd_id;
if (unlikely(wmi->stopped))
......@@ -218,20 +219,20 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
cmd_id = be16_to_cpu(hdr->command_id);
if (cmd_id & 0x1000) {
spin_lock(&wmi->wmi_lock);
spin_lock_irqsave(&wmi->wmi_lock, flags);
__skb_queue_tail(&wmi->wmi_event_queue, skb);
spin_unlock(&wmi->wmi_lock);
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
tasklet_schedule(&wmi->wmi_event_tasklet);
return;
}
/* Check if there has been a timeout. */
spin_lock(&wmi->wmi_lock);
spin_lock_irqsave(&wmi->wmi_lock, flags);
if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
spin_unlock(&wmi->wmi_lock);
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
goto free_skb;
}
spin_unlock(&wmi->wmi_lock);
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
/* WMI command response */
ath9k_wmi_rsp_callback(wmi, skb);
......
......@@ -493,7 +493,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
struct wcn36xx_sta *sta_priv = sta ? wcn36xx_sta_to_priv(sta) : NULL;
int ret = 0;
u8 key[WLAN_MAX_KEY_LEN];
......@@ -512,7 +512,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
break;
case WLAN_CIPHER_SUITE_WEP104:
vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP104;
break;
case WLAN_CIPHER_SUITE_CCMP:
vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
......@@ -567,15 +567,19 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_conf->keyidx,
key_conf->keylen,
key);
if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
(WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
sta_priv->is_data_encrypted = true;
wcn36xx_smd_set_stakey(wcn,
vif_priv->encrypt_type,
key_conf->keyidx,
key_conf->keylen,
key,
get_sta_index(vif, sta_priv));
list_for_each_entry(sta_priv,
&vif_priv->sta_list, list) {
sta_priv->is_data_encrypted = true;
wcn36xx_smd_set_stakey(wcn,
vif_priv->encrypt_type,
key_conf->keyidx,
key_conf->keylen,
key,
get_sta_index(vif, sta_priv));
}
}
}
break;
......@@ -984,6 +988,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
mutex_lock(&wcn->conf_mutex);
vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
INIT_LIST_HEAD(&vif_priv->sta_list);
list_add(&vif_priv->list, &wcn->vif_list);
wcn36xx_smd_add_sta_self(wcn, vif);
......@@ -1005,6 +1010,8 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
spin_lock_init(&sta_priv->ampdu_lock);
sta_priv->vif = vif_priv;
list_add(&sta_priv->list, &vif_priv->sta_list);
/*
* For STA mode HW will be configured on BSS_CHANGED_ASSOC because
* at this stage AID is not available yet.
......@@ -1032,6 +1039,7 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
mutex_lock(&wcn->conf_mutex);
list_del(&sta_priv->list);
wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
sta_priv->vif = NULL;
......@@ -1153,8 +1161,6 @@ static const struct ieee80211_ops wcn36xx_ops = {
static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
{
int ret = 0;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
......@@ -1201,7 +1207,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
wiphy_ext_feature_set(wcn->hw->wiphy,
NL80211_EXT_FEATURE_CQM_RSSI_LIST);
return ret;
return 0;
}
static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
......
This diff is collapsed.
......@@ -129,6 +129,8 @@ struct wcn36xx_vif {
u8 self_sta_index;
u8 self_dpu_desc_index;
u8 self_ucast_dpu_sign;
struct list_head sta_list;
};
/**
......@@ -154,6 +156,7 @@ struct wcn36xx_vif {
* |______________|_____________|_______________|
*/
struct wcn36xx_sta {
struct list_head list;
struct wcn36xx_vif *vif;
u16 aid;
u16 tid;
......
......@@ -9,6 +9,7 @@ wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o
wil6210-y += wmi.o
wil6210-y += interrupt.o
wil6210-y += txrx.o
wil6210-y += txrx_edma.o
wil6210-y += debug.o
wil6210-y += rx_reorder.o
wil6210-y += fw.o
......
......@@ -1726,7 +1726,7 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int authorize;
int cid, i;
struct vring_tx_data *txdata = NULL;
struct wil_ring_tx_data *txdata = NULL;
wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x mid %d\n",
mac, params->sta_flags_mask, params->sta_flags_set,
......@@ -1746,20 +1746,20 @@ static int wil_cfg80211_change_station(struct wiphy *wiphy,
return -ENOLINK;
}
for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++)
if (wil->vring2cid_tid[i][0] == cid) {
txdata = &wil->vring_tx_data[i];
for (i = 0; i < ARRAY_SIZE(wil->ring2cid_tid); i++)
if (wil->ring2cid_tid[i][0] == cid) {
txdata = &wil->ring_tx_data[i];
break;
}
if (!txdata) {
wil_err(wil, "vring data not found\n");
wil_err(wil, "ring data not found\n");
return -ENOLINK;
}
authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED);
txdata->dot1x_open = authorize ? 1 : 0;
wil_dbg_misc(wil, "cid %d vring %d authorize %d\n", cid, i,
wil_dbg_misc(wil, "cid %d ring %d authorize %d\n", cid, i,
txdata->dot1x_open);
return 0;
......
This diff is collapsed.
......@@ -101,7 +101,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
if (ret < 0)
return ret;
wil_configure_interrupt_moderation(wil);
wil->txrx_ops.configure_interrupt_moderation(wil);
wil_pm_runtime_put(wil);
......
This diff is collapsed.
......@@ -120,6 +120,27 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
return done;
}
static int wil6210_netdev_poll_rx_edma(struct napi_struct *napi, int budget)
{
struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
napi_rx);
int quota = budget;
int done;
wil_rx_handle_edma(wil, &quota);
done = budget - quota;
if (done < budget) {
napi_complete_done(napi, done);
wil6210_unmask_irq_rx_edma(wil);
wil_dbg_txrx(wil, "NAPI RX complete\n");
}
wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
return done;
}
static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
{
struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
......@@ -129,11 +150,11 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
/* always process ALL Tx complete, regardless budget - it is fast */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
struct vring *vring = &wil->vring_tx[i];
struct vring_tx_data *txdata = &wil->vring_tx_data[i];
struct wil_ring *ring = &wil->ring_tx[i];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
struct wil6210_vif *vif;
if (!vring->va || !txdata->enabled ||
if (!ring->va || !txdata->enabled ||
txdata->mid >= wil->max_vifs)
continue;
......@@ -157,6 +178,30 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
return min(tx_done, budget);
}
static int wil6210_netdev_poll_tx_edma(struct napi_struct *napi, int budget)
{
struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
napi_tx);
int tx_done;
/* There is only one status TX ring */
struct wil_status_ring *sring = &wil->srings[wil->tx_sring_idx];
if (!sring->va)
return 0;
tx_done = wil_tx_sring_handler(wil, sring);
if (tx_done < budget) {
napi_complete(napi);
wil6210_unmask_irq_tx_edma(wil);
wil_dbg_txrx(wil, "NAPI TX complete\n");
}
wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
return min(tx_done, budget);
}
static void wil_dev_setup(struct net_device *dev)
{
ether_setup(dev);
......@@ -228,7 +273,7 @@ static void wil_p2p_discovery_timer_fn(struct timer_list *t)
static void wil_vif_init(struct wil6210_vif *vif)
{
vif->bcast_vring = -1;
vif->bcast_ring = -1;
mutex_init(&vif->probe_client_mutex);
......@@ -418,11 +463,21 @@ int wil_if_add(struct wil6210_priv *wil)
}
init_dummy_netdev(&wil->napi_ndev);
netif_napi_add(&wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
WIL6210_NAPI_BUDGET);
netif_tx_napi_add(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx,
WIL6210_NAPI_BUDGET);
if (wil->use_enhanced_dma_hw) {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx_edma,
WIL6210_NAPI_BUDGET);
netif_tx_napi_add(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx_edma,
WIL6210_NAPI_BUDGET);
} else {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx,
WIL6210_NAPI_BUDGET);
netif_tx_napi_add(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx,
WIL6210_NAPI_BUDGET);
}
wil_update_net_queues_bh(wil, vif, NULL, true);
......
......@@ -85,7 +85,7 @@ int wil_set_capabilities(struct wil6210_priv *wil)
wil->rgf_ucode_assert_code_addr = SPARROW_RGF_UCODE_ASSERT_CODE;
break;
case JTAG_DEV_ID_TALYN:
wil->hw_name = "Talyn";
wil->hw_name = "Talyn-MA";
wil->hw_version = HW_VER_TALYN;
memcpy(fw_mapping, talyn_fw_mapping, sizeof(talyn_fw_mapping));
wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
......@@ -94,6 +94,17 @@ int wil_set_capabilities(struct wil6210_priv *wil)
BIT_NO_FLASH_INDICATION)
set_bit(hw_capa_no_flash, wil->hw_capa);
break;
case JTAG_DEV_ID_TALYN_MB:
wil->hw_name = "Talyn-MB";
wil->hw_version = HW_VER_TALYN_MB;
memcpy(fw_mapping, talyn_mb_fw_mapping,
sizeof(talyn_mb_fw_mapping));
wil->rgf_fw_assert_code_addr = TALYN_RGF_FW_ASSERT_CODE;
wil->rgf_ucode_assert_code_addr = TALYN_RGF_UCODE_ASSERT_CODE;
set_bit(hw_capa_no_flash, wil->hw_capa);
wil->use_enhanced_dma_hw = true;
wil->use_rx_hw_reordering = true;
break;
default:
wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
jtag_id, chip_revision);
......@@ -102,6 +113,8 @@ int wil_set_capabilities(struct wil6210_priv *wil)
return -EINVAL;
}
wil_init_txrx_ops(wil);
iccm_section = wil_find_fw_mapping("fw_code");
if (!iccm_section) {
wil_err(wil, "fw_code section not found in fw_mapping\n");
......@@ -257,8 +270,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.fw_recovery = wil_platform_rop_fw_recovery,
};
u32 bar_size = pci_resource_len(pdev, 0);
int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
int i;
int dma_addr_size[] = {64, 48, 40, 32}; /* keep descending order */
int i, start_idx;
/* check HW */
dev_info(&pdev->dev, WIL_NAME
......@@ -293,24 +306,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto if_free;
}
/* rollback to err_plat */
/* device supports >32bit addresses */
for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
rc = dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(dma_addr_size[i]));
if (rc) {
dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
dma_addr_size[i], rc);
continue;
}
dev_info(dev, "using dma mask %d", dma_addr_size[i]);
wil->dma_addr_size = dma_addr_size[i];
break;
}
if (wil->dma_addr_size == 0)
goto err_plat;
rc = pci_enable_device(pdev);
if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
......@@ -350,6 +345,28 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wil_err(wil, "wil_set_capabilities failed, rc %d\n", rc);
goto err_iounmap;
}
/* device supports >32bit addresses.
* for legacy DMA start from 48 bit.
*/
start_idx = wil->use_enhanced_dma_hw ? 0 : 1;
for (i = start_idx; i < ARRAY_SIZE(dma_addr_size); i++) {
rc = dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(dma_addr_size[i]));
if (rc) {
dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
dma_addr_size[i], rc);
continue;
}
dev_info(dev, "using dma mask %d", dma_addr_size[i]);
wil->dma_addr_size = dma_addr_size[i];
break;
}
if (wil->dma_addr_size == 0)
goto err_iounmap;
wil6210_clear_irq(wil);
/* FW should raise IRQ when ready */
......
......@@ -211,7 +211,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
goto reject_suspend;
}
if (!wil_is_rx_idle(wil)) {
if (!wil->txrx_ops.is_rx_idle(wil)) {
wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
wil->suspend_stats.rejected_by_host++;
goto reject_suspend;
......@@ -235,9 +235,9 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
start = jiffies;
data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
while (!wil_is_rx_idle(wil)) {
while (!wil->txrx_ops.is_rx_idle(wil)) {
if (time_after(jiffies, data_comp_to)) {
if (wil_is_rx_idle(wil))
if (wil->txrx_ops.is_rx_idle(wil))
break;
wil_err(wil,
"TO waiting for idle RX, suspend failed\n");
......
......@@ -95,17 +95,17 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
struct wil6210_vif *vif;
struct net_device *ndev;
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
int tid = wil_rxdesc_tid(d);
int cid = wil_rxdesc_cid(d);
int mid = wil_rxdesc_mid(d);
u16 seq = wil_rxdesc_seq(d);
int mcast = wil_rxdesc_mcast(d);
struct wil_sta_info *sta = &wil->sta[cid];
int tid, cid, mid, mcast;
u16 seq;
struct wil_sta_info *sta;
struct wil_tid_ampdu_rx *r;
u16 hseq;
int index;
wil->txrx_ops.get_reorder_params(wil, skb, &tid, &cid, &mid, &seq,
&mcast);
sta = &wil->sta[cid];
wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n",
mid, cid, tid, seq, mcast);
......@@ -315,7 +315,10 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
* bits 6..15: buffer size
*/
u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
bool agg_amsdu = !!(param_set & BIT(0));
bool agg_amsdu = wil->use_enhanced_dma_hw &&
wil->use_rx_hw_reordering &&
test_bit(WMI_FW_CAPABILITY_AMSDU, wil->fw_capabilities) &&
wil->amsdu_en && (param_set & BIT(0));
int ba_policy = param_set & BIT(1);
u16 status = WLAN_STATUS_SUCCESS;
u16 ssn = seq_ctrl >> 4;
......@@ -360,8 +363,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
}
}
rc = wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token, status,
agg_amsdu, agg_wsize, agg_timeout);
rc = wil->txrx_ops.wmi_addba_rx_resp(wil, mid, cid, tid, dialog_token,
status, agg_amsdu, agg_wsize,
agg_timeout);
if (rc || (status != WLAN_STATUS_SUCCESS)) {
wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
status);
......@@ -384,7 +388,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
{
u8 agg_wsize = wil_agg_size(wil, wsize);
u16 agg_timeout = 0;
struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
int rc = 0;
if (txdata->addba_in_progress) {
......
......@@ -187,6 +187,40 @@ TRACE_EVENT(wil6210_rx,
__entry->seq, __entry->type, __entry->subtype)
);
TRACE_EVENT(wil6210_rx_status,
TP_PROTO(struct wil6210_priv *wil, u8 use_compressed, u16 buff_id,
void *msg),
TP_ARGS(wil, use_compressed, buff_id, msg),
TP_STRUCT__entry(__field(u8, use_compressed)
__field(u16, buff_id)
__field(unsigned int, len)
__field(u8, mid)
__field(u8, cid)
__field(u8, tid)
__field(u8, type)
__field(u8, subtype)
__field(u16, seq)
__field(u8, mcs)
),
TP_fast_assign(__entry->use_compressed = use_compressed;
__entry->buff_id = buff_id;
__entry->len = wil_rx_status_get_length(msg);
__entry->mid = wil_rx_status_get_mid(msg);
__entry->cid = wil_rx_status_get_cid(msg);
__entry->tid = wil_rx_status_get_tid(msg);
__entry->type = wil_rx_status_get_frame_type(wil,
msg);
__entry->subtype = wil_rx_status_get_fc1(wil, msg);
__entry->seq = wil_rx_status_get_seq(wil, msg);
__entry->mcs = wil_rx_status_get_mcs(msg);
),
TP_printk(
"compressed %d buff_id %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x type 0x%1x subtype 0x%1x",
__entry->use_compressed, __entry->buff_id, __entry->len,
__entry->mid, __entry->cid, __entry->tid, __entry->mcs,
__entry->seq, __entry->type, __entry->subtype)
);
TRACE_EVENT(wil6210_tx,
TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags),
TP_ARGS(vring, index, len, frags),
......@@ -226,6 +260,31 @@ TRACE_EVENT(wil6210_tx_done,
__entry->err)
);
TRACE_EVENT(wil6210_tx_status,
TP_PROTO(struct wil_ring_tx_status *msg, u16 index,
unsigned int len),
TP_ARGS(msg, index, len),
TP_STRUCT__entry(__field(u16, index)
__field(unsigned int, len)
__field(u8, num_descs)
__field(u8, ring_id)
__field(u8, status)
__field(u8, mcs)
),
TP_fast_assign(__entry->index = index;
__entry->len = len;
__entry->num_descs = msg->num_descriptors;
__entry->ring_id = msg->ring_id;
__entry->status = msg->status;
__entry->mcs = wil_tx_status_get_mcs(msg);
),
TP_printk(
"ring_id %d swtail 0x%x len %d num_descs %d status 0x%x mcs %d",
__entry->ring_id, __entry->index, __entry->len,
__entry->num_descs, __entry->status, __entry->mcs)
);
#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/
#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__)
......
This diff is collapsed.
......@@ -18,6 +18,9 @@
#ifndef WIL6210_TXRX_H
#define WIL6210_TXRX_H
#include "wil6210.h"
#include "txrx_edma.h"
#define BUF_SW_OWNED (1)
#define BUF_HW_OWNED (0)
......@@ -29,19 +32,13 @@
/* Tx/Rx path */
/* Common representation of physical address in Vring */
struct vring_dma_addr {
__le32 addr_low;
__le16 addr_high;
} __packed;
static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr)
static inline dma_addr_t wil_desc_addr(struct wil_ring_dma_addr *addr)
{
return le32_to_cpu(addr->addr_low) |
((u64)le16_to_cpu(addr->addr_high) << 32);
}
static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
static inline void wil_desc_addr_set(struct wil_ring_dma_addr *addr,
dma_addr_t pa)
{
addr->addr_low = cpu_to_le32(lower_32_bits(pa));
......@@ -294,7 +291,7 @@ struct vring_tx_mac {
*/
struct vring_tx_dma {
u32 d0;
struct vring_dma_addr addr;
struct wil_ring_dma_addr addr;
u8 ip_length;
u8 b11; /* 0..6: mac_length; 7:ip_version */
u8 error; /* 0..2: err; 3..7: reserved; */
......@@ -428,7 +425,7 @@ struct vring_rx_mac {
struct vring_rx_dma {
u32 d0;
struct vring_dma_addr addr;
struct wil_ring_dma_addr addr;
u8 ip_length;
u8 b11;
u8 error;
......@@ -441,14 +438,24 @@ struct vring_tx_desc {
struct vring_tx_dma dma;
} __packed;
union wil_tx_desc {
struct vring_tx_desc legacy;
struct wil_tx_enhanced_desc enhanced;
} __packed;
struct vring_rx_desc {
struct vring_rx_mac mac;
struct vring_rx_dma dma;
} __packed;
union vring_desc {
struct vring_tx_desc tx;
struct vring_rx_desc rx;
union wil_rx_desc {
struct vring_rx_desc legacy;
struct wil_rx_enhanced_desc enhanced;
} __packed;
union wil_ring_desc {
union wil_tx_desc tx;
union wil_rx_desc rx;
} __packed;
static inline int wil_rxdesc_tid(struct vring_rx_desc *d)
......@@ -528,6 +535,76 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
return (void *)skb->cb;
}
static inline int wil_ring_is_empty(struct wil_ring *ring)
{
return ring->swhead == ring->swtail;
}
static inline u32 wil_ring_next_tail(struct wil_ring *ring)
{
return (ring->swtail + 1) % ring->size;
}
static inline void wil_ring_advance_head(struct wil_ring *ring, int n)
{
ring->swhead = (ring->swhead + n) % ring->size;
}
static inline int wil_ring_is_full(struct wil_ring *ring)
{
return wil_ring_next_tail(ring) == ring->swhead;
}
static inline bool wil_need_txstat(struct sk_buff *skb)
{
struct ethhdr *eth = (void *)skb->data;
return is_unicast_ether_addr(eth->h_dest) && skb->sk &&
(skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS);
}
static inline void wil_consume_skb(struct sk_buff *skb, bool acked)
{
if (unlikely(wil_need_txstat(skb)))
skb_complete_wifi_ack(skb, acked);
else
acked ? dev_consume_skb_any(skb) : dev_kfree_skb_any(skb);
}
/* Used space in Tx ring */
static inline int wil_ring_used_tx(struct wil_ring *ring)
{
u32 swhead = ring->swhead;
u32 swtail = ring->swtail;
return (ring->size + swhead - swtail) % ring->size;
}
/* Available space in Tx ring */
static inline int wil_ring_avail_tx(struct wil_ring *ring)
{
return ring->size - wil_ring_used_tx(ring) - 1;
}
static inline int wil_get_min_tx_ring_id(struct wil6210_priv *wil)
{
/* In Enhanced DMA ring 0 is reserved for RX */
return wil->use_enhanced_dma_hw ? 1 : 0;
}
/* similar to ieee80211_ version, but FC contain only 1-st byte */
static inline int wil_is_back_req(u8 fc)
{
return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
}
/* wil_val_in_range - check if value in [min,max) */
static inline bool wil_val_in_range(int val, int min, int max)
{
return val >= min && val < max;
}
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif,
......@@ -536,5 +613,7 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
int size, u16 ssn);
void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
struct wil_tid_ampdu_rx *r);
void wil_tx_data_init(struct wil_ring_tx_data *txdata);
void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil);
#endif /* WIL6210_TXRX_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
......@@ -36,7 +37,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
if (!map->fw)
if (!map->crash_dump)
continue;
if (map->host < host_min)
......@@ -85,7 +86,7 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
if (!map->fw)
if (!map->crash_dump)
continue;
data = (void * __force)wil->csr + HOSTADDR(map->host);
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment