Commit af5399b2 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'mt76-for-kvalo-2019-02-18' of https://github.com/nbd168/wireless

mt76 patches for 5.1

* beacon support for USB devices (mesh+ad-hoc only)
* mt76x0 tx power fixes
* fixes for encryption, calibration and ED/CCA
* more code unification
* USB fixes
* fix for using the correct hweight8 function
parents f77ecde5 9f688473
......@@ -4,7 +4,8 @@ obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o
obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
tx.o agg-rx.o mcu.o
mt76-usb-y := usb.o usb_trace.o usb_mcu.o
......
......@@ -242,6 +242,30 @@ mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
iowrite32(q->head, &q->regs->cpu_idx);
}
static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
struct mt76_queue *q = &dev->q_tx[qid];
struct mt76_queue_buf buf;
dma_addr_t addr;
addr = dma_map_single(dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev->dev, addr))
return -ENOMEM;
buf.addr = addr;
buf.len = skb->len;
spin_lock_bh(&q->lock);
mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
mt76_dma_kick_queue(dev, q);
spin_unlock_bh(&q->lock);
return 0;
}
int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
......@@ -522,17 +546,16 @@ mt76_dma_init(struct mt76_dev *dev)
static const struct mt76_queue_ops mt76_dma_ops = {
.init = mt76_dma_init,
.alloc = mt76_dma_alloc_queue,
.add_buf = mt76_dma_add_buf,
.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
.tx_queue_skb = mt76_dma_tx_queue_skb,
.tx_cleanup = mt76_dma_tx_cleanup,
.rx_reset = mt76_dma_rx_reset,
.kick = mt76_dma_kick_queue,
};
int mt76_dma_attach(struct mt76_dev *dev)
void mt76_dma_attach(struct mt76_dev *dev)
{
dev->queue_ops = &mt76_dma_ops;
return 0;
}
EXPORT_SYMBOL_GPL(mt76_dma_attach);
......
......@@ -54,7 +54,7 @@ enum mt76_mcu_evt_type {
EVT_EVENT_DFS_DETECT_RSP,
};
int mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
#endif
......@@ -124,7 +124,7 @@ static void mt76_init_stream_cap(struct mt76_dev *dev,
bool vht)
{
struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
int i, nstream = __sw_hweight8(dev->antenna_mask);
int i, nstream = hweight8(dev->antenna_mask);
struct ieee80211_sta_vht_cap *vht_cap;
u16 mcs_map = 0;
......@@ -269,7 +269,9 @@ mt76_check_sband(struct mt76_dev *dev, int band)
}
struct mt76_dev *
mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops)
mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
const struct mt76_driver_ops *drv_ops)
{
struct ieee80211_hw *hw;
struct mt76_dev *dev;
......@@ -280,6 +282,9 @@ mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops)
dev = hw->priv;
dev->hw = hw;
dev->dev = pdev;
dev->drv = drv_ops;
spin_lock_init(&dev->rx_lock);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->cc_lock);
......@@ -721,7 +726,7 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int *dbm)
{
struct mt76_dev *dev = hw->priv;
int n_chains = __sw_hweight8(dev->antenna_mask);
int n_chains = hweight8(dev->antenna_mask);
*dbm = dev->txpower_cur / 2;
......
/*
* Copyright (C) 2019 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "mt76.h"
struct sk_buff *
mt76_mcu_msg_alloc(const void *data, int head_len,
int data_len, int tail_len)
{
struct sk_buff *skb;
skb = alloc_skb(head_len + data_len + tail_len,
GFP_KERNEL);
if (!skb)
return NULL;
skb_reserve(skb, head_len);
if (data && data_len)
skb_put_data(skb, data, data_len);
return skb;
}
EXPORT_SYMBOL_GPL(mt76_mcu_msg_alloc);
/* mmio */
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires)
{
unsigned long timeout;
if (!time_is_after_jiffies(expires))
return NULL;
timeout = expires - jiffies;
wait_event_timeout(dev->mmio.mcu.wait,
!skb_queue_empty(&dev->mmio.mcu.res_q),
timeout);
return skb_dequeue(&dev->mmio.mcu.res_q);
}
EXPORT_SYMBOL_GPL(mt76_mcu_get_response);
void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb)
{
skb_queue_tail(&dev->mmio.mcu.res_q, skb);
wake_up(&dev->mmio.mcu.wait);
}
EXPORT_SYMBOL_GPL(mt76_mcu_rx_event);
......@@ -87,6 +87,7 @@ struct mt76u_buf {
struct mt76_dev *dev;
struct urb *urb;
size_t len;
void *buf;
bool done;
};
......@@ -157,6 +158,9 @@ struct mt76_queue_ops {
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta);
int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info);
void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
int *len, u32 *info, bool *more);
......@@ -376,6 +380,7 @@ struct mt76_usb {
u16 out_max_packet;
u8 in_ep[__MT_EP_IN_MAX];
u16 in_max_packet;
bool sg_en;
struct mt76u_mcu {
struct mutex mutex;
......@@ -562,7 +567,7 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__)
#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
......@@ -582,8 +587,9 @@ mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
return &msband->chan[idx];
}
struct mt76_dev *mt76_alloc_device(unsigned int size,
const struct ieee80211_ops *ops);
struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
const struct mt76_driver_ops *drv_ops);
int mt76_register_device(struct mt76_dev *dev, bool vht,
struct ieee80211_rate *rates, int n_rates);
void mt76_unregister_device(struct mt76_dev *dev);
......@@ -722,14 +728,17 @@ static inline u8 q2ep(u8 qid)
return qid + 1;
}
static inline bool mt76u_check_sg(struct mt76_dev *dev)
static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int timeout)
{
struct usb_interface *intf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
unsigned int pipe;
int sent;
return (udev->bus->sg_tablesize > 0 &&
(udev->bus->no_sg_constraint ||
udev->speed == USB_SPEED_WIRELESS));
pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
return usb_bulk_msg(udev, pipe, data, len, &sent, timeout);
}
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
......@@ -740,7 +749,7 @@ void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
void mt76u_deinit(struct mt76_dev *dev);
int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
int nsgs, int len, int sglen, gfp_t gfp);
int len, int data_len, gfp_t gfp);
void mt76u_buf_free(struct mt76u_buf *buf);
int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
struct mt76u_buf *buf, gfp_t gfp,
......@@ -751,6 +760,13 @@ void mt76u_stop_queues(struct mt76_dev *dev);
void mt76u_stop_stat_wk(struct mt76_dev *dev);
void mt76u_queues_deinit(struct mt76_dev *dev);
struct sk_buff *
mt76_mcu_msg_alloc(const void *data, int head_len,
int data_len, int tail_len);
void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires);
void mt76u_mcu_complete_urb(struct urb *urb);
int mt76u_mcu_init_rx(struct mt76_dev *dev);
void mt76u_mcu_deinit(struct mt76_dev *dev);
......
......@@ -152,11 +152,11 @@ static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
return mt76x02_rate_power_val(val);
}
void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
struct ieee80211_channel *chan,
struct mt76_rate_power *t)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
struct mt76_rate_power *t = &dev->mt76.rate_power;
u16 val, addr;
s8 delta;
......@@ -189,7 +189,7 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126;
val = mt76x02_eeprom_get(dev, addr);
t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
t->ht[6] = t->vht[6] = s6_to_s8(val >> 8);
t->ht[6] = t->ht[7] = t->vht[6] = t->vht[7] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 0, 1, 2, 3 stbc */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec;
......@@ -205,14 +205,15 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
/* vht mcs 8, 9 5GHz */
val = mt76x02_eeprom_get(dev, 0x132);
t->vht[7] = s6_to_s8(val);
t->vht[8] = s6_to_s8(val >> 8);
t->vht[8] = s6_to_s8(val);
t->vht[9] = s6_to_s8(val >> 8);
delta = mt76x0_tssi_enabled(dev) ? 0 : mt76x0_get_delta(dev);
mt76x02_add_rate_power_offset(t, delta);
}
void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp)
void mt76x0_get_power_info(struct mt76x02_dev *dev,
struct ieee80211_channel *chan, s8 *tp)
{
struct mt76x0_chan_map {
u8 chan;
......@@ -226,7 +227,6 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp)
{ 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 },
{ 167, 34 }, { 171, 36 }, { 175, 38 },
};
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
u8 offset, addr;
int i, idx = 0;
u16 data;
......
......@@ -25,8 +25,11 @@ struct mt76x02_dev;
int mt76x0_eeprom_init(struct mt76x02_dev *dev);
void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev);
void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp);
void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
struct ieee80211_channel *chan,
struct mt76_rate_power *t);
void mt76x0_get_power_info(struct mt76x02_dev *dev,
struct ieee80211_channel *chan, s8 *tp);
static inline s8 s6_to_s8(u32 val)
{
......
......@@ -18,6 +18,7 @@
#include "eeprom.h"
#include "mcu.h"
#include "initvals.h"
#include "../mt76x02_phy.h"
static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband)
{
......@@ -262,27 +263,24 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x0_init_hardware);
struct mt76x02_dev *
mt76x0_alloc_device(struct device *pdev,
const struct mt76_driver_ops *drv_ops,
const struct ieee80211_ops *ops)
static void
mt76x0_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_supported_band *sband)
{
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
mdev = mt76_alloc_device(sizeof(*dev), ops);
if (!mdev)
return NULL;
struct ieee80211_channel *chan;
struct mt76_rate_power t;
s8 tp;
int i;
mdev->dev = pdev;
mdev->drv = drv_ops;
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
dev = container_of(mdev, struct mt76x02_dev, mt76);
mutex_init(&dev->phy_mutex);
mt76x0_get_tx_power_per_rate(dev, chan, &t);
mt76x0_get_power_info(dev, chan, &tp);
return dev;
chan->max_power = (mt76x02_get_max_rate_power(&t) + tp) / 2;
}
}
EXPORT_SYMBOL_GPL(mt76x0_alloc_device);
int mt76x0_register_device(struct mt76x02_dev *dev)
{
......@@ -296,9 +294,14 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
if (ret)
return ret;
if (dev->mt76.cap.has_5ghz) {
/* overwrite unsupported features */
if (dev->mt76.cap.has_5ghz)
mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband);
mt76x0_init_txpower(dev, &dev->mt76.sband_5g.sband);
}
if (dev->mt76.cap.has_2ghz)
mt76x0_init_txpower(dev, &dev->mt76.sband_2g.sband);
mt76x02_init_debugfs(dev);
......
......@@ -50,10 +50,6 @@ static inline bool is_mt7630(struct mt76x02_dev *dev)
}
/* Init */
struct mt76x02_dev *
mt76x0_alloc_device(struct device *pdev,
const struct mt76_driver_ops *drv_ops,
const struct ieee80211_ops *ops);
int mt76x0_init_hardware(struct mt76x02_dev *dev);
int mt76x0_register_device(struct mt76x02_dev *dev);
void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
......
......@@ -30,7 +30,7 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
mt76x02_mac_start(dev);
mt76x0_phy_calibrate(dev, true);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
MT_CALIBRATE_INTERVAL);
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
......@@ -174,6 +174,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.sta_remove = mt76x02_sta_remove,
};
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
int ret;
ret = pcim_enable_device(pdev);
......@@ -190,16 +191,20 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
dev = mt76x0_alloc_device(&pdev->dev, &drv_ops, &mt76x0e_ops);
if (!dev)
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
dev = container_of(mdev, struct mt76x02_dev, mt76);
mutex_init(&dev->phy_mutex);
mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler,
ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
goto error;
......
......@@ -847,14 +847,15 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
struct mt76_rate_power *t = &dev->mt76.rate_power;
s8 info;
mt76x0_get_tx_power_per_rate(dev);
mt76x0_get_power_info(dev, &info);
mt76x0_get_tx_power_per_rate(dev, dev->mt76.chandef.chan, t);
mt76x0_get_power_info(dev, dev->mt76.chandef.chan, &info);
mt76x02_add_rate_power_offset(t, info);
mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
mt76x02_add_rate_power_offset(t, -info);
dev->target_power = info;
mt76x02_phy_set_txpower(dev, info, info);
}
......@@ -1006,14 +1007,16 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
/* enable vco */
mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7));
if (scan)
if (scan) {
mt76x02_edcca_init(dev, false);
return 0;
}
mt76x02_init_agc_gain(dev);
mt76x0_phy_calibrate(dev, false);
mt76x0_phy_set_txpower(dev);
mt76x02_edcca_init(dev);
mt76x02_edcca_init(dev, true);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
......
......@@ -118,7 +118,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
mt76x0_phy_calibrate(dev, true);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
MT_CALIBRATE_INTERVAL);
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
......@@ -206,7 +206,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
goto out_err;
/* check hw sg support in order to enable AMSDU */
if (mt76u_check_sg(&dev->mt76))
if (dev->mt76.usb.sg_en)
hw->max_tx_fragments = MT_SG_MAX_SIZE;
else
hw->max_tx_fragments = 1;
......@@ -233,14 +233,18 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
};
struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
u32 asic_rev, mac_rev;
int ret;
dev = mt76x0_alloc_device(&usb_intf->dev, &drv_ops,
&mt76x0u_ops);
if (!dev)
mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;
dev = container_of(mdev, struct mt76x02_dev, mt76);
mutex_init(&dev->phy_mutex);
/* Quirk for Archer T1U */
if (id->driver_info)
dev->no_2ghz = true;
......@@ -250,27 +254,27 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
usb_set_intfdata(usb_intf, dev);
mt76x02u_init_mcu(&dev->mt76);
ret = mt76u_init(&dev->mt76, usb_intf);
mt76x02u_init_mcu(mdev);
ret = mt76u_init(mdev, usb_intf);
if (ret)
goto err;
/* Disable the HW, otherwise MCU fail to initalize on hot reboot */
mt76x0_chip_onoff(dev, false, false);
if (!mt76x02_wait_for_mac(&dev->mt76)) {
if (!mt76x02_wait_for_mac(mdev)) {
ret = -ETIMEDOUT;
goto err;
}
asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
mac_rev = mt76_rr(dev, MT_MAC_CSR0);
dev_info(dev->mt76.dev, "ASIC revision: %08x MAC revision: %08x\n",
dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
asic_rev, mac_rev);
/* Note: vendor driver skips this check for MT76X0U */
if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n");
dev_warn(mdev->dev, "Warning: eFUSE not present\n");
ret = mt76x0u_register_device(dev);
if (ret < 0)
......@@ -282,7 +286,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
usb_set_intfdata(usb_intf, NULL);
usb_put_dev(interface_to_usbdev(usb_intf));
ieee80211_free_hw(dev->mt76.hw);
ieee80211_free_hw(mdev->hw);
return ret;
}
......
......@@ -140,12 +140,6 @@ static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
mt76x02u_mcu_fw_reset(dev);
usleep_range(5000, 6000);
/*
mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
MT_PBF_CFG_TX1Q_EN |
MT_PBF_CFG_TX2Q_EN |
MT_PBF_CFG_TX3Q_EN));
*/
mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
......
......@@ -27,6 +27,7 @@
#include "mt76x02_dma.h"
#define MT_CALIBRATE_INTERVAL HZ
#define MT_MAC_WORK_INTERVAL (HZ / 10)
#define MT_WATCHDOG_TIME (HZ / 10)
#define MT_TX_HANG_TH 10
......@@ -73,6 +74,8 @@ struct mt76x02_dev {
struct mutex phy_mutex;
u16 vif_mask;
u8 txdone_seq;
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
......@@ -114,6 +117,7 @@ struct mt76x02_dev {
bool ed_monitor;
u8 ed_trigger;
u8 ed_silent;
ktime_t ed_time;
};
extern struct ieee80211_rate mt76x02_rates[12];
......@@ -128,8 +132,7 @@ void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev);
void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
unsigned int idx);
int mt76x02_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void mt76x02_remove_interface(struct ieee80211_hw *hw,
......
......@@ -886,7 +886,7 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
tasklet_disable(&dfs_pd->dfs_tasklet);
dev->ed_monitor = region == NL80211_DFS_ETSI;
mt76x02_edcca_init(dev);
mt76x02_edcca_init(dev, true);
dfs_pd->region = region;
mt76x02_dfs_init_params(dev);
......
......@@ -291,6 +291,13 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
memset(txwi, 0, sizeof(*txwi));
if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff &&
ieee80211_has_protected(hdr->frame_control)) {
wcid = NULL;
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
}
if (wcid)
txwi->wcid = wcid->idx;
else
......@@ -307,7 +314,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
ccmp_pn[6] = pn >> 32;
ccmp_pn[7] = pn >> 40;
txwi->iv = *((__le32 *)&ccmp_pn[0]);
txwi->eiv = *((__le32 *)&ccmp_pn[1]);
txwi->eiv = *((__le32 *)&ccmp_pn[4]);
}
spin_lock_bh(&dev->mt76.lock);
......@@ -548,8 +555,11 @@ mt76x02_mac_process_rate(struct mt76x02_dev *dev,
return 0;
}
void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr)
void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
{
static const u8 null_addr[ETH_ALEN] = {};
int i;
ether_addr_copy(dev->mt76.macaddr, addr);
if (!is_valid_ether_addr(dev->mt76.macaddr)) {
......@@ -563,6 +573,16 @@ void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr)
mt76_wr(dev, MT_MAC_ADDR_DW1,
get_unaligned_le16(dev->mt76.macaddr + 4) |
FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
mt76_wr(dev, MT_MAC_BSSID_DW0,
get_unaligned_le32(dev->mt76.macaddr));
mt76_wr(dev, MT_MAC_BSSID_DW1,
get_unaligned_le16(dev->mt76.macaddr + 4) |
FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */
MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
for (i = 0; i < 16; i++)
mt76x02_mac_set_bssid(dev, i, null_addr);
}
EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
......@@ -588,7 +608,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
u16 rate = le16_to_cpu(rxwi->rate);
u16 tid_sn = le16_to_cpu(rxwi->tid_sn);
bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST);
int i, pad_len = 0, nstreams = dev->mt76.chainmask & 0xf;
int pad_len = 0, nstreams = dev->mt76.chainmask & 0xf;
s8 signal;
u8 pn_len;
u8 wcid;
......@@ -648,12 +668,13 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
status->chains = BIT(0);
signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
for (i = 0; i < nstreams; i++) {
status->chains |= BIT(i);
status->chain_signal[i] = mt76x02_mac_get_rssi(dev,
rxwi->rssi[i],
i);
signal = max_t(s8, signal, status->chain_signal[i]);
status->chain_signal[0] = signal;
if (nstreams > 1) {
status->chains |= BIT(1);
status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
rxwi->rssi[1],
1);
signal = max_t(s8, signal, status->chain_signal[1]);
}
status->signal = signal;
status->freq = dev->mt76.chandef.chan->center_freq;
......@@ -871,12 +892,12 @@ mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable)
dev->ed_tx_blocked = !enable;
}
void mt76x02_edcca_init(struct mt76x02_dev *dev)
void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
{
dev->ed_trigger = 0;
dev->ed_silent = 0;
if (dev->ed_monitor) {
if (dev->ed_monitor && enable) {
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;
......@@ -899,17 +920,27 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev)
}
}
mt76x02_edcca_tx_enable(dev, true);
/* clear previous CCA timer value */
mt76_rr(dev, MT_ED_CCA_TIMER);
dev->ed_time = ktime_get_boottime();
}
EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
#define MT_EDCCA_TH 90
#define MT_EDCCA_TH 92
#define MT_EDCCA_BLOCK_TH 2
static void mt76x02_edcca_check(struct mt76x02_dev *dev)
{
u32 val, busy;
ktime_t cur_time;
u32 active, val, busy;
cur_time = ktime_get_boottime();
val = mt76_rr(dev, MT_ED_CCA_TIMER);
busy = (val * 100) / jiffies_to_usecs(MT_CALIBRATE_INTERVAL);
active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
dev->ed_time = cur_time;
busy = (val * 100) / active;
busy = min_t(u32, busy, 100);
if (busy > MT_EDCCA_TH) {
......@@ -955,7 +986,7 @@ void mt76x02_mac_work(struct work_struct *work)
mt76_tx_status_check(&dev->mt76, NULL, false);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
MT_MAC_WORK_INTERVAL);
}
void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
......@@ -1047,8 +1078,9 @@ int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
return 0;
}
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
u8 vif_idx, bool val)
static void
__mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx,
bool val, struct sk_buff *skb)
{
u8 old_mask = dev->beacon_mask;
bool en;
......@@ -1056,6 +1088,8 @@ void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
if (val) {
dev->beacon_mask |= BIT(vif_idx);
if (skb)
mt76x02_mac_set_beacon(dev, vif_idx, skb);
} else {
dev->beacon_mask &= ~BIT(vif_idx);
mt76x02_mac_set_beacon(dev, vif_idx, NULL);
......@@ -1066,14 +1100,34 @@ void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
en = dev->beacon_mask;
mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
reg = MT_BEACON_TIME_CFG_BEACON_TX |
MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_TIMER_EN;
mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
if (mt76_is_usb(dev))
return;
mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
if (en)
mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
else
mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
}
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
struct ieee80211_vif *vif, bool val)
{
u8 vif_idx = ((struct mt76x02_vif *)vif->drv_priv)->idx;
struct sk_buff *skb = NULL;
if (mt76_is_mmio(dev))
tasklet_disable(&dev->pre_tbtt_tasklet);
else if (val)
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
__mt76x02_mac_set_beacon_enable(dev, vif_idx, val, skb);
if (mt76_is_mmio(dev))
tasklet_enable(&dev->pre_tbtt_tasklet);
}
......@@ -191,7 +191,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
int ht_mode);
void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val);
void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr);
void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr);
void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int len);
......@@ -204,8 +204,8 @@ void mt76x02_mac_work(struct work_struct *work);
void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
struct sk_buff *skb);
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx,
bool val);
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
struct ieee80211_vif *vif, bool val);
void mt76x02_edcca_init(struct mt76x02_dev *dev);
void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable);
#endif
......@@ -21,70 +21,13 @@
#include "mt76x02_mcu.h"
static struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
{
struct sk_buff *skb;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return NULL;
memcpy(skb_put(skb, len), data, len);
return skb;
}
static struct sk_buff *
mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires)
{
unsigned long timeout;
if (!time_is_after_jiffies(expires))
return NULL;
timeout = expires - jiffies;
wait_event_timeout(dev->mt76.mmio.mcu.wait,
!skb_queue_empty(&dev->mt76.mmio.mcu.res_q),
timeout);
return skb_dequeue(&dev->mt76.mmio.mcu.res_q);
}
static int
mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, int cmd, int seq)
{
struct mt76_queue *q = &dev->mt76.q_tx[qid];
struct mt76_queue_buf buf;
dma_addr_t addr;
u32 tx_info;
tx_info = MT_MCU_MSG_TYPE_CMD |
FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev->mt76.dev, addr))
return -ENOMEM;
buf.addr = addr;
buf.len = skb->len;
spin_lock_bh(&q->lock);
mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock);
return 0;
}
int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
int len, bool wait_resp)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
unsigned long expires = jiffies + HZ;
struct sk_buff *skb;
u32 tx_info;
int ret;
u8 seq;
......@@ -98,7 +41,13 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
if (!seq)
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
tx_info = MT_MCU_MSG_TYPE_CMD |
FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, tx_info);
if (ret)
goto out;
......@@ -106,7 +55,7 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
u32 *rxfce;
bool check_seq = false;
skb = mt76x02_mcu_get_response(dev, expires);
skb = mt76_mcu_get_response(&dev->mt76, expires);
if (!skb) {
dev_err(mdev->dev,
"MCU message %d (seq %d) timed out\n", cmd,
......
......@@ -96,6 +96,12 @@ struct mt76x02_patch_header {
u8 pad[2];
};
static inline struct sk_buff *
mt76x02_mcu_msg_alloc(const void *data, int len)
{
return mt76_mcu_msg_alloc(data, 0, len, 0);
}
int mt76x02_mcu_cleanup(struct mt76x02_dev *dev);
int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param);
int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
......
......@@ -22,7 +22,6 @@
void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76x02_dev *dev = hw->priv;
struct ieee80211_vif *vif = info->control.vif;
......@@ -33,13 +32,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
msta = (struct mt76x02_sta *)control->sta->drv_priv;
wcid = &msta->wcid;
/* sw encrypted frames */
if (!info->control.hw_key && wcid->hw_key_idx != 0xff &&
ieee80211_has_protected(hdr->frame_control))
control->sta = NULL;
}
if (vif && !control->sta) {
} else if (vif) {
struct mt76x02_vif *mvif;
mvif = (struct mt76x02_vif *)vif->drv_priv;
......@@ -58,8 +51,7 @@ void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
if (q == MT_RXQ_MCU) {
/* this is used just by mmio code */
skb_queue_tail(&mdev->mmio.mcu.res_q, skb);
wake_up(&mdev->mmio.mcu.wait);
mt76_mcu_rx_event(&dev->mt76, skb);
return;
}
......
......@@ -49,7 +49,12 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
put_unaligned_le32(info, skb_push(skb, sizeof(info)));
/* Add zero pad of 4 - 7 bytes */
pad = round_up(skb->len, 4) + 4 - skb->len;
/* First packet of a A-MSDU burst keeps track of the whole burst
* length, need to update lenght of it and the last packet.
*/
skb_walk_frags(skb, iter) {
last = iter;
if (!iter->next) {
......@@ -59,11 +64,10 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
}
}
if (unlikely(pad)) {
if (skb_pad(last, pad))
return -ENOMEM;
__skb_put(last, pad);
}
return 0;
}
......
......@@ -28,21 +28,6 @@
#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
static struct sk_buff *
mt76x02u_mcu_msg_alloc(const void *data, int len)
{
struct sk_buff *skb;
skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL);
if (!skb)
return NULL;
skb_reserve(skb, MT_CMD_HDR_LEN);
skb_put_data(skb, data, len);
return skb;
}
static void
mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
{
......@@ -78,9 +63,9 @@ static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
struct mt76_usb *usb = &dev->usb;
struct mt76u_buf *buf = &usb->mcu.res;
struct urb *urb = buf->urb;
u8 *data = buf->buf;
int i, ret;
u32 rxfce;
u8 *data;
for (i = 0; i < 5; i++) {
if (!wait_for_completion_timeout(&usb->mcu.cmpl,
......@@ -90,7 +75,6 @@ static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
if (urb->status)
return -EIO;
data = sg_virt(&urb->sg[0]);
if (usb->mcu.rp)
mt76x02u_multiple_mcu_reads(dev, data + 4,
urb->actual_length - 8);
......@@ -121,18 +105,14 @@ static int
__mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
struct usb_interface *intf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
unsigned int pipe;
int ret, sent;
int ret;
u8 seq = 0;
u32 info;
if (test_bit(MT76_REMOVED, &dev->state))
return 0;
pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
if (wait_resp) {
seq = ++usb->mcu.msg_seq & 0xf;
if (!seq)
......@@ -146,7 +126,7 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
if (ret)
return ret;
ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
ret = mt76u_bulk_msg(dev, skb->data, skb->len, 500);
if (ret)
return ret;
......@@ -166,7 +146,7 @@ mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
struct sk_buff *skb;
int err;
skb = mt76x02u_mcu_msg_alloc(data, len);
skb = mt76_mcu_msg_alloc(data, MT_CMD_HDR_LEN, len, 8);
if (!skb)
return -ENOMEM;
......@@ -268,14 +248,12 @@ void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
static int
__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
const void *fw_data, int len, u32 dst_addr)
{
u8 *data = sg_virt(&buf->urb->sg[0]);
DECLARE_COMPLETION_ONSTACK(cmpl);
__le32 info;
u32 val;
int err;
int err, data_len;
info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
FIELD_PREP(MT_MCU_MSG_LEN, len) |
......@@ -291,25 +269,12 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
MT_FCE_DMA_LEN, len << 16);
buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT,
MT_EP_OUT_INBAND_CMD,
buf, GFP_KERNEL,
mt76u_mcu_complete_urb, &cmpl);
if (err < 0)
return err;
if (!wait_for_completion_timeout(&cmpl,
msecs_to_jiffies(1000))) {
dev_err(dev->mt76.dev, "firmware upload timed out\n");
usb_kill_urb(buf->urb);
return -ETIMEDOUT;
}
data_len = MT_CMD_HDR_LEN + len + sizeof(info);
if (mt76u_urb_error(buf->urb)) {
dev_err(dev->mt76.dev, "firmware upload failed: %d\n",
buf->urb->status);
return buf->urb->status;
err = mt76u_bulk_msg(&dev->mt76, data, data_len, 1000);
if (err) {
dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
return err;
}
val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
......@@ -322,17 +287,16 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
int data_len, u32 max_payload, u32 offset)
{
int err, len, pos = 0, max_len = max_payload - 8;
struct mt76u_buf buf;
int len, err = 0, pos = 0, max_len = max_payload - 8;
u8 *buf;
err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload,
GFP_KERNEL);
if (err < 0)
return err;
buf = kmalloc(max_payload, GFP_KERNEL);
if (!buf)
return -ENOMEM;
while (data_len > 0) {
len = min_t(int, data_len, max_len);
err = __mt76x02u_mcu_fw_send_data(dev, &buf, data + pos,
err = __mt76x02u_mcu_fw_send_data(dev, buf, data + pos,
len, offset + pos);
if (err < 0)
break;
......@@ -341,7 +305,7 @@ int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
pos += len;
usleep_range(5000, 10000);
}
mt76u_buf_free(&buf);
kfree(buf);
return err;
}
......
......@@ -140,10 +140,16 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
hw->max_rate_tries = 1;
hw->extra_tx_headroom = 2;
wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_ADHOC);
if (mt76_is_usb(dev)) {
hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
MT_DMA_HDR_LEN;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
} else {
INIT_DELAYED_WORK(&dev->wdt_work, mt76x02_wdt_work);
......@@ -152,18 +158,9 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
wiphy->reg_notifier = mt76x02_regd_notifier;
wiphy->iface_combinations = mt76x02_if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb);
wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_ADHOC);
wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
dev->mt76.led_cdev.brightness_set =
......@@ -172,6 +169,8 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
}
}
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
hw->sta_data_size = sizeof(struct mt76x02_sta);
hw->vif_data_size = sizeof(struct mt76x02_vif);
......@@ -268,7 +267,8 @@ void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76x02_sta_remove);
void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
static void
mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
unsigned int idx)
{
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
......@@ -282,7 +282,6 @@ void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
mt76_txq_init(&dev->mt76, vif->txq);
}
EXPORT_SYMBOL_GPL(mt76x02_vif_init);
int
mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
......@@ -309,6 +308,15 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
if (vif->type == NL80211_IFTYPE_STATION)
idx += 8;
if (dev->vif_mask & BIT(idx))
return -EBUSY;
/* Allow to change address in HW if we create first interface. */
if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr))
mt76x02_mac_setaddr(dev, vif->addr);
dev->vif_mask |= BIT(idx);
mt76x02_vif_init(dev, vif, idx);
return 0;
}
......@@ -318,8 +326,10 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt76x02_dev *dev = hw->priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
mt76_txq_remove(&dev->mt76, vif->txq);
dev->vif_mask &= ~BIT(mvif->idx);
}
EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
......@@ -421,7 +431,7 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
} else {
if (idx == wcid->hw_key_idx) {
wcid->hw_key_idx = -1;
wcid->sw_iv = true;
wcid->sw_iv = false;
}
key = NULL;
......@@ -657,29 +667,26 @@ static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
{
static const u8 null_addr[ETH_ALEN] = {};
int i;
mt76_wr(dev, MT_MAC_BSSID_DW0,
get_unaligned_le32(dev->mt76.macaddr));
mt76_wr(dev, MT_MAC_BSSID_DW1,
get_unaligned_le16(dev->mt76.macaddr + 4) |
FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */
MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
if (mt76_is_mmio(dev)) {
/* Fire a pre-TBTT interrupt 8 ms before TBTT */
mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
8 << 4);
mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
MT_DFS_GP_INTERVAL);
mt76_wr(dev, MT_INT_TIMER_EN, 0);
}
mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_BEACON_TX));
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
for (i = 0; i < 8; i++) {
mt76x02_mac_set_bssid(dev, i, null_addr);
for (i = 0; i < 8; i++)
mt76x02_mac_set_beacon(dev, i, NULL);
}
mt76x02_set_beacon_offsets(dev);
}
EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
......@@ -697,12 +704,8 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID)
mt76x02_mac_set_bssid(dev, mvif->idx, info->bssid);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
tasklet_disable(&dev->pre_tbtt_tasklet);
mt76x02_mac_set_beacon_enable(dev, mvif->idx,
info->enable_beacon);
tasklet_enable(&dev->pre_tbtt_tasklet);
}
if (changed & BSS_CHANGED_BEACON_ENABLED)
mt76x02_mac_set_beacon_enable(dev, vif, info->enable_beacon);
if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
mt76x02_mac_set_tx_protection(dev, info->use_cts_prot,
......
......@@ -49,7 +49,6 @@ static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev)
extern const struct ieee80211_ops mt76x2_ops;
struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev);
int mt76x2_register_device(struct mt76x02_dev *dev);
void mt76x2_phy_power_on(struct mt76x02_dev *dev);
......
......@@ -29,7 +29,6 @@
extern const struct ieee80211_ops mt76x2u_ops;
struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev);
int mt76x2u_register_device(struct mt76x02_dev *dev);
int mt76x2u_init_hardware(struct mt76x02_dev *dev);
void mt76x2u_cleanup(struct mt76x02_dev *dev);
......
......@@ -30,7 +30,19 @@ static const struct pci_device_id mt76pci_device_table[] = {
static int
mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb,
.rx_poll_complete = mt76x02_rx_poll_complete,
.sta_ps = mt76x02_sta_ps,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
};
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
int ret;
ret = pcim_enable_device(pdev);
......@@ -47,17 +59,19 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
dev = mt76x2_alloc_device(&pdev->dev);
if (!dev)
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x2_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
dev = container_of(mdev, struct mt76x02_dev, mt76);
mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]);
mt76x2_reset_wlan(dev, false);
dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler,
ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
goto error;
......
......@@ -119,9 +119,7 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401);
mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr));
mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4));
mt76x02_mac_setaddr(dev, macaddr);
mt76x02_init_beacon_config(dev);
if (!hard)
return 0;
......@@ -315,33 +313,6 @@ void mt76x2_cleanup(struct mt76x02_dev *dev)
mt76x02_mcu_cleanup(dev);
}
struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb,
.rx_poll_complete = mt76x02_rx_poll_complete,
.sta_ps = mt76x02_sta_ps,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
};
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
mdev = mt76_alloc_device(sizeof(*dev), &mt76x2_ops);
if (!mdev)
return NULL;
dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev->dev = pdev;
mdev->drv = &drv_ops;
return dev;
}
int mt76x2_register_device(struct mt76x02_dev *dev)
{
int ret;
......
......@@ -33,7 +33,7 @@ mt76x2_start(struct ieee80211_hw *hw)
goto out;
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
MT_WATCHDOG_TIME);
......
......@@ -240,8 +240,10 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F);
if (scan)
if (scan) {
mt76x02_edcca_init(dev, false);
return 0;
}
mt76x2_phy_channel_calibrate(dev, true);
mt76x02_init_agc_gain(dev);
......@@ -254,7 +256,7 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
0x38);
}
mt76x02_edcca_init(dev);
mt76x02_edcca_init(dev, true);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
MT_CALIBRATE_INTERVAL);
......
......@@ -241,7 +241,7 @@ void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev)
t.offset1 = txp.chain[1].tssi_offset;
mt76x2_mcu_tssi_comp(dev, &t);
if (t.pa_mode || dev->cal.dpd_cal_done)
if (t.pa_mode || dev->cal.dpd_cal_done || dev->ed_tx_blocked)
return;
usleep_range(10000, 20000);
......
......@@ -36,24 +36,36 @@ static const struct usb_device_id mt76x2u_device_table[] = {
static int mt76x2u_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
.rx_skb = mt76x02_queue_rx_skb,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
};
struct usb_device *udev = interface_to_usbdev(intf);
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
int err;
dev = mt76x2u_alloc_device(&intf->dev);
if (!dev)
mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;
dev = container_of(mdev, struct mt76x02_dev, mt76);
udev = usb_get_dev(udev);
usb_reset_device(udev);
mt76x02u_init_mcu(&dev->mt76);
err = mt76u_init(&dev->mt76, intf);
mt76x02u_init_mcu(mdev);
err = mt76u_init(mdev, intf);
if (err < 0)
goto err;
dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
err = mt76x2u_register_device(dev);
if (err < 0)
......
......@@ -134,30 +134,6 @@ static int mt76x2u_init_eeprom(struct mt76x02_dev *dev)
return 0;
}
struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev)
{
static const struct mt76_driver_ops drv_ops = {
.tx_prepare_skb = mt76x02u_tx_prepare_skb,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
.rx_skb = mt76x02_queue_rx_skb,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
};
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops);
if (!mdev)
return NULL;
dev = container_of(mdev, struct mt76x02_dev, mt76);
mdev->dev = pdev;
mdev->drv = &drv_ops;
return dev;
}
int mt76x2u_init_hardware(struct mt76x02_dev *dev)
{
int i, k, err;
......@@ -207,11 +183,7 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
mt76x02_mac_shared_key_setup(dev, i, k, NULL);
}
mt76_clear(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_BEACON_TX);
mt76x02_init_beacon_config(dev);
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
......@@ -256,7 +228,7 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
goto fail;
/* check hw sg support in order to enable AMSDU */
if (mt76u_check_sg(&dev->mt76))
if (dev->mt76.usb.sg_en)
hw->max_tx_fragments = MT_SG_MAX_SIZE;
else
hw->max_tx_fragments = 1;
......
......@@ -28,7 +28,7 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
goto out;
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
MT_MAC_WORK_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
out:
......@@ -46,19 +46,6 @@ static void mt76x2u_stop(struct ieee80211_hw *hw)
mutex_unlock(&dev->mt76.mutex);
}
static int mt76x2u_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt76x02_dev *dev = hw->priv;
unsigned int idx = 8;
if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
mt76x02_mac_setaddr(dev, vif->addr);
mt76x02_vif_init(dev, vif, idx);
return 0;
}
static int
mt76x2u_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
......@@ -125,7 +112,7 @@ const struct ieee80211_ops mt76x2u_ops = {
.tx = mt76x02_tx,
.start = mt76x2u_start,
.stop = mt76x2u_stop,
.add_interface = mt76x2u_add_interface,
.add_interface = mt76x02_add_interface,
.remove_interface = mt76x02_remove_interface,
.sta_state = mt76_sta_state,
.set_key = mt76x02_set_key,
......
......@@ -205,9 +205,6 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
{
struct sk_buff *skb, *tmp;
if (pktid == MT_PACKET_ID_NO_ACK)
return NULL;
skb_queue_walk_safe(&dev->status_list, skb, tmp) {
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
......@@ -217,7 +214,7 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
if (cb->pktid == pktid)
return skb;
if (!pktid &&
if (pktid >= 0 &&
!time_after(jiffies, cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT))
continue;
......
......@@ -22,6 +22,10 @@
#define MT_VEND_REQ_MAX_RETRY 10
#define MT_VEND_REQ_TOUT_MS 300
static bool disable_usb_sg;
module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
/* should be called with usb_ctrl_mtx locked */
static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
......@@ -241,6 +245,16 @@ mt76u_rd_rp(struct mt76_dev *dev, u32 base,
return mt76u_req_rd_rp(dev, base, data, n);
}
static bool mt76u_check_sg(struct mt76_dev *dev)
{
struct usb_interface *intf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(intf);
return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
(udev->bus->no_sg_constraint ||
udev->speed == USB_SPEED_WIRELESS));
}
static int
mt76u_set_endpoints(struct usb_interface *intf,
struct mt76_usb *usb)
......@@ -309,7 +323,8 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
return i ? : -ENOMEM;
}
int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
static int
mt76u_buf_alloc_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
int nsgs, int len, int sglen, gfp_t gfp)
{
buf->urb = usb_alloc_urb(0, gfp);
......@@ -326,15 +341,42 @@ int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
}
EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
int len, int data_len, gfp_t gfp)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
buf->urb = usb_alloc_urb(0, gfp);
if (!buf->urb)
return -ENOMEM;
buf->buf = page_frag_alloc(&q->rx_page, len, gfp);
if (!buf->buf)
return -ENOMEM;
buf->len = data_len;
buf->dev = dev;
return 0;
}
void mt76u_buf_free(struct mt76u_buf *buf)
{
struct urb *urb = buf->urb;
struct scatterlist *sg;
int i;
for (i = 0; i < urb->num_sgs; i++)
skb_free_frag(sg_virt(&urb->sg[i]));
for (i = 0; i < urb->num_sgs; i++) {
sg = &urb->sg[i];
if (!sg)
continue;
skb_free_frag(sg_virt(sg));
}
if (buf->buf)
skb_free_frag(buf->buf);
usb_free_urb(buf->urb);
}
EXPORT_SYMBOL_GPL(mt76u_buf_free);
......@@ -345,6 +387,7 @@ int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
{
struct usb_interface *intf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(intf);
u8 *data = buf->urb->num_sgs ? NULL : buf->buf;
unsigned int pipe;
if (dir == USB_DIR_IN)
......@@ -352,7 +395,7 @@ int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
else
pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
usb_fill_bulk_urb(buf->urb, udev, pipe, data, buf->len,
complete_fn, context);
trace_submit_urb(dev, buf->urb);
......@@ -393,10 +436,11 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
}
static int
mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
u8 *data = sg_virt(&urb->sg[0]);
struct urb *urb = buf->urb;
u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : buf->buf;
int data_len, len, nsgs = 1;
struct sk_buff *skb;
......@@ -407,7 +451,8 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
if (len < 0)
return 0;
data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
data_len = urb->num_sgs ? urb->sg[0].length : buf->len;
data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size))
return 0;
......@@ -419,7 +464,7 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
__skb_put(skb, data_len);
len -= data_len;
while (len > 0) {
while (len > 0 && urb->num_sgs) {
data_len = min_t(int, len, urb->sg[nsgs].length);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
sg_page(&urb->sg[nsgs]),
......@@ -447,7 +492,8 @@ static void mt76u_complete_rx(struct urb *urb)
case -ENOENT:
return;
default:
dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
urb->status);
/* fall through */
case 0:
break;
......@@ -464,12 +510,26 @@ static void mt76u_complete_rx(struct urb *urb)
spin_unlock_irqrestore(&q->lock, flags);
}
static int
mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76u_buf *buf, int nsgs)
{
if (dev->usb.sg_en) {
return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size,
SKB_WITH_OVERHEAD(q->buf_size));
} else {
buf->buf = page_frag_alloc(&q->rx_page, q->buf_size,
GFP_ATOMIC);
return buf->buf ? 0 : -ENOMEM;
}
}
static void mt76u_rx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
int err, nsgs, buf_len = q->buf_size;
struct mt76u_buf *buf;
int err, count;
rcu_read_lock();
......@@ -478,11 +538,9 @@ static void mt76u_rx_tasklet(unsigned long data)
if (!buf)
break;
nsgs = mt76u_process_rx_entry(dev, buf->urb);
if (nsgs > 0) {
err = mt76u_fill_rx_sg(dev, buf, nsgs,
buf_len,
SKB_WITH_OVERHEAD(buf_len));
count = mt76u_process_rx_entry(dev, buf);
if (count > 0) {
err = mt76u_refill_rx(dev, q, buf, count);
if (err < 0)
break;
}
......@@ -520,7 +578,7 @@ EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
static int mt76u_alloc_rx(struct mt76_dev *dev)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
int i, err, nsgs;
int i, err;
spin_lock_init(&q->rx_page_lock);
spin_lock_init(&q->lock);
......@@ -530,23 +588,22 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
if (!q->entry)
return -ENOMEM;
if (mt76u_check_sg(dev)) {
q->buf_size = MT_RX_BUF_SIZE;
nsgs = MT_SG_MAX_SIZE;
} else {
q->buf_size = PAGE_SIZE;
nsgs = 1;
}
for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
q->ndesc = MT_NUM_RX_ENTRIES;
for (i = 0; i < q->ndesc; i++) {
if (dev->usb.sg_en)
err = mt76u_buf_alloc_sg(dev, &q->entry[i].ubuf,
MT_SG_MAX_SIZE, q->buf_size,
SKB_WITH_OVERHEAD(q->buf_size),
GFP_KERNEL);
else
err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
nsgs, q->buf_size,
q->buf_size,
SKB_WITH_OVERHEAD(q->buf_size),
GFP_KERNEL);
if (err < 0)
return err;
}
q->ndesc = MT_NUM_RX_ENTRIES;
return mt76u_submit_rx_buffers(dev);
}
......@@ -691,7 +748,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
{
struct usb_interface *intf = to_usb_interface(dev->dev);
struct usb_device *udev = interface_to_usbdev(intf);
u8 ep = q2ep(q->hw_idx);
u8 *data = NULL, ep = q2ep(q->hw_idx);
struct mt76u_buf *buf;
u16 idx = q->tail;
unsigned int pipe;
......@@ -708,12 +765,16 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
buf = &q->entry[idx].ubuf;
buf->done = false;
if (dev->usb.sg_en) {
err = mt76u_tx_build_sg(skb, buf->urb);
if (err < 0)
return err;
} else {
data = skb->data;
}
pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
usb_fill_bulk_urb(buf->urb, udev, pipe, data, skb->len,
mt76u_complete_tx, buf);
q->tail = (q->tail + 1) % q->ndesc;
......@@ -749,10 +810,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
{
struct mt76u_buf *buf;
struct mt76_queue *q;
size_t size;
int i, j;
size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = &dev->q_tx[i];
spin_lock_init(&q->lock);
......@@ -774,11 +833,17 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
if (!buf->urb)
return -ENOMEM;
buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
if (dev->usb.sg_en) {
size_t size = MT_SG_MAX_SIZE *
sizeof(struct scatterlist);
buf->urb->sg = devm_kzalloc(dev->dev, size,
GFP_KERNEL);
if (!buf->urb->sg)
return -ENOMEM;
}
}
}
return 0;
}
......@@ -838,16 +903,9 @@ int mt76u_alloc_queues(struct mt76_dev *dev)
err = mt76u_alloc_rx(dev);
if (err < 0)
goto err;
err = mt76u_alloc_tx(dev);
if (err < 0)
goto err;
return 0;
err:
mt76u_queues_deinit(dev);
return err;
return mt76u_alloc_tx(dev);
}
EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
......@@ -882,6 +940,8 @@ int mt76u_init(struct mt76_dev *dev,
dev->bus = &mt76u_ops;
dev->queue_ops = &usb_queue_ops;
usb->sg_en = mt76u_check_sg(dev);
return mt76u_set_endpoints(intf, usb);
}
EXPORT_SYMBOL_GPL(mt76u_init);
......
......@@ -29,9 +29,8 @@ int mt76u_mcu_init_rx(struct mt76_dev *dev)
struct mt76_usb *usb = &dev->usb;
int err;
err = mt76u_buf_alloc(dev, &usb->mcu.res, 1,
MCU_RESP_URB_SIZE, MCU_RESP_URB_SIZE,
GFP_KERNEL);
err = mt76u_buf_alloc(dev, &usb->mcu.res, MCU_RESP_URB_SIZE,
MCU_RESP_URB_SIZE, GFP_KERNEL);
if (err < 0)
return err;
......@@ -48,9 +47,11 @@ EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx);
void mt76u_mcu_deinit(struct mt76_dev *dev)
{
struct mt76_usb *usb = &dev->usb;
struct mt76u_buf *buf = &dev->usb.mcu.res;
usb_kill_urb(usb->mcu.res.urb);
mt76u_buf_free(&usb->mcu.res);
if (buf->urb) {
usb_kill_urb(buf->urb);
mt76u_buf_free(buf);
}
}
EXPORT_SYMBOL_GPL(mt76u_mcu_deinit);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment