Commit 7009deab authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next

Conflicts:
	drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
	drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
	drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
	drivers/net/wireless/rtlwifi/rtl8192de/phy.h
	drivers/net/wireless/rtlwifi/rtl8723ae/phy.h

Just some minor conflicts between the wireless-next changes
and Joe Perches's "extern" removal from function prototypes
in header files.

John W. Linville says:

====================
Regarding the Bluetooth bits, Gustavo says:

"The big work here is from Marcel and Johan. They did a lot of work
in the L2CAP, HCI and MGMT layers. The most important ones are the
addition of a new MGMT command to enable/disable LE advertisement
and the introduction of the HCI user channel to allow applications
to get directly and exclusive access to Bluetooth devices."

As to the ath10k bits, Kalle says:

"Bartosz dropped support for qca98xx hw1.0 hardware from ath10k, it's
just too much to support it. Michal added support for the new firmware
interface. Marek fixed WEP in AP and IBSS mode. Rest of the changes are
minor fixes or cleanups."

And also:

"Major changes are:

* throughput improvements including aligning the RX frames correctly and
  optimising HTT layer (Michal)

* remove qca98xx hw1.0 support (Bartosz)

* add support for firmware version 999.999.0.636 (Michal)

* firmware htt statistics support (Kalle)

* fix WEP in AP and IBSS mode (Marek)

* fix a mutex unlock balance in debugfs file (Shafi)

And of course there's a lot of smaller fixes and cleanup."

For the wl12xx bits, Luca says:

"Here are some patches intended for 3.13.  Eliad is upstreaming a bunch
of patches that have been pending in the internal tree.  Mostly bugfixes
and other small improvements."

Along with that...

Arend and friends bring us a batch of brcmfmac updates, Larry Finger
offers some rtlwifi refactoring, and Sujith sends the usual batch of
ath9k updates.  As usual, there are a number of other small updates
from a variety of players as well.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5cde2829 274dede8
......@@ -30,3 +30,5 @@ hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o
hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
hci_uart-objs := $(hci_uart-y)
ccflags-y += -D__CHECK_ENDIAN__
......@@ -23,6 +23,8 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <net/bluetooth/bluetooth.h>
#include <linux/ctype.h>
#include <linux/firmware.h>
#define BTM_HEADER_LEN 4
#define BTM_UPLD_SIZE 2312
......@@ -41,6 +43,8 @@ struct btmrvl_thread {
struct btmrvl_device {
void *card;
struct hci_dev *hcidev;
struct device *dev;
const char *cal_data;
u8 dev_type;
......@@ -91,6 +95,7 @@ struct btmrvl_private {
#define BT_CMD_HOST_SLEEP_CONFIG 0x59
#define BT_CMD_HOST_SLEEP_ENABLE 0x5A
#define BT_CMD_MODULE_CFG_REQ 0x5B
#define BT_CMD_LOAD_CONFIG_DATA 0x61
/* Sub-commands: Module Bringup/Shutdown Request/Response */
#define MODULE_BRINGUP_REQ 0xF1
......@@ -116,11 +121,8 @@ struct btmrvl_private {
#define PS_SLEEP 0x01
#define PS_AWAKE 0x00
struct btmrvl_cmd {
__le16 ocf_ogf;
u8 length;
u8 data[4];
} __packed;
#define BT_CMD_DATA_SIZE 32
#define BT_CAL_DATA_SIZE 28
struct btmrvl_event {
u8 ec; /* event counter */
......
......@@ -57,8 +57,7 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
ocf = hci_opcode_ocf(opcode);
ogf = hci_opcode_ogf(opcode);
if (ocf == BT_CMD_MODULE_CFG_REQ &&
priv->btmrvl_dev.sendcmdflag) {
if (priv->btmrvl_dev.sendcmdflag) {
priv->btmrvl_dev.sendcmdflag = false;
priv->adapter->cmd_complete = true;
wake_up_interruptible(&priv->adapter->cmd_wait_q);
......@@ -116,7 +115,6 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
adapter->hs_state = HS_ACTIVATED;
if (adapter->psmode)
adapter->ps_state = PS_SLEEP;
wake_up_interruptible(&adapter->cmd_wait_q);
BT_DBG("HS ACTIVATED!");
} else {
BT_DBG("HS Enable failed");
......@@ -168,22 +166,24 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(btmrvl_process_event);
int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 cmd_no,
const void *param, u8 len)
{
struct sk_buff *skb;
struct btmrvl_cmd *cmd;
int ret = 0;
struct hci_command_hdr *hdr;
skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
return -ENOMEM;
}
cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_MODULE_CFG_REQ));
cmd->length = 1;
cmd->data[0] = subcmd;
hdr = (struct hci_command_hdr *)skb_put(skb, HCI_COMMAND_HDR_SIZE);
hdr->opcode = cpu_to_le16(hci_opcode_pack(OGF, cmd_no));
hdr->plen = len;
if (len)
memcpy(skb_put(skb, len), param, len);
bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
......@@ -194,19 +194,23 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
priv->adapter->cmd_complete = false;
BT_DBG("Queue module cfg Command");
wake_up_interruptible(&priv->main_thread.wait_q);
if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
priv->adapter->cmd_complete,
msecs_to_jiffies(WAIT_UNTIL_CMD_RESP))) {
ret = -ETIMEDOUT;
BT_ERR("module_cfg_cmd(%x): timeout: %d",
subcmd, priv->btmrvl_dev.sendcmdflag);
}
msecs_to_jiffies(WAIT_UNTIL_CMD_RESP)))
return -ETIMEDOUT;
BT_DBG("module cfg Command done");
return 0;
}
int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
{
int ret;
ret = btmrvl_send_sync_cmd(priv, BT_CMD_MODULE_CFG_REQ, &subcmd, 1);
if (ret)
BT_ERR("module_cfg_cmd(%x) failed\n", subcmd);
return ret;
}
......@@ -214,61 +218,36 @@ EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv)
{
struct sk_buff *skb;
struct btmrvl_cmd *cmd;
skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
if (!skb) {
BT_ERR("No free skb");
return -ENOMEM;
}
cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
BT_CMD_HOST_SLEEP_CONFIG));
cmd->length = 2;
cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
int ret;
u8 param[2];
bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
param[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
param[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
skb->dev = (void *) priv->btmrvl_dev.hcidev;
skb_queue_head(&priv->adapter->tx_queue, skb);
BT_DBG("Sending HSCFG Command, gpio=0x%x, gap=0x%x",
param[0], param[1]);
BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", cmd->data[0],
cmd->data[1]);
ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_CONFIG, param, 2);
if (ret)
BT_ERR("HSCFG command failed\n");
return 0;
return ret;
}
EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd);
int btmrvl_enable_ps(struct btmrvl_private *priv)
{
struct sk_buff *skb;
struct btmrvl_cmd *cmd;
skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
return -ENOMEM;
}
cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
BT_CMD_AUTO_SLEEP_MODE));
cmd->length = 1;
int ret;
u8 param;
if (priv->btmrvl_dev.psmode)
cmd->data[0] = BT_PS_ENABLE;
param = BT_PS_ENABLE;
else
cmd->data[0] = BT_PS_DISABLE;
bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
param = BT_PS_DISABLE;
skb->dev = (void *) priv->btmrvl_dev.hcidev;
skb_queue_head(&priv->adapter->tx_queue, skb);
BT_DBG("Queue PSMODE Command:%d", cmd->data[0]);
ret = btmrvl_send_sync_cmd(priv, BT_CMD_AUTO_SLEEP_MODE, &param, 1);
if (ret)
BT_ERR("PSMODE command failed\n");
return 0;
}
......@@ -276,37 +255,11 @@ EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
int btmrvl_enable_hs(struct btmrvl_private *priv)
{
struct sk_buff *skb;
struct btmrvl_cmd *cmd;
int ret = 0;
skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
return -ENOMEM;
}
cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_ENABLE));
cmd->length = 0;
bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
skb->dev = (void *) priv->btmrvl_dev.hcidev;
skb_queue_head(&priv->adapter->tx_queue, skb);
BT_DBG("Queue hs enable Command");
wake_up_interruptible(&priv->main_thread.wait_q);
int ret;
if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
priv->adapter->hs_state,
msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED))) {
ret = -ETIMEDOUT;
BT_ERR("timeout: %d, %d,%d", priv->adapter->hs_state,
priv->adapter->ps_state,
priv->adapter->wakeup_tries);
}
ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0);
if (ret)
BT_ERR("Host sleep enable command failed\n");
return ret;
}
......@@ -479,6 +432,137 @@ static int btmrvl_open(struct hci_dev *hdev)
return 0;
}
/*
* This function parses provided calibration data input. It should contain
* hex bytes separated by space or new line character. Here is an example.
* 00 1C 01 37 FF FF FF FF 02 04 7F 01
* CE BA 00 00 00 2D C6 C0 00 00 00 00
* 00 F0 00 00
*/
static int btmrvl_parse_cal_cfg(const u8 *src, u32 len, u8 *dst, u32 dst_size)
{
const u8 *s = src;
u8 *d = dst;
int ret;
u8 tmp[3];
tmp[2] = '\0';
while ((s - src) <= len - 2) {
if (isspace(*s)) {
s++;
continue;
}
if (isxdigit(*s)) {
if ((d - dst) >= dst_size) {
BT_ERR("calibration data file too big!!!");
return -EINVAL;
}
memcpy(tmp, s, 2);
ret = kstrtou8(tmp, 16, d++);
if (ret < 0)
return ret;
s += 2;
} else {
return -EINVAL;
}
}
if (d == dst)
return -EINVAL;
return 0;
}
static int btmrvl_load_cal_data(struct btmrvl_private *priv,
u8 *config_data)
{
int i, ret;
u8 data[BT_CMD_DATA_SIZE];
data[0] = 0x00;
data[1] = 0x00;
data[2] = 0x00;
data[3] = BT_CMD_DATA_SIZE - 4;
/* Swap cal-data bytes. Each four bytes are swapped. Considering 4
* byte SDIO header offset, mapping of input and output bytes will be
* {3, 2, 1, 0} -> {0+4, 1+4, 2+4, 3+4},
* {7, 6, 5, 4} -> {4+4, 5+4, 6+4, 7+4} */
for (i = 4; i < BT_CMD_DATA_SIZE; i++)
data[i] = config_data[(i / 4) * 8 - 1 - i];
print_hex_dump_bytes("Calibration data: ",
DUMP_PREFIX_OFFSET, data, BT_CMD_DATA_SIZE);
ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data,
BT_CMD_DATA_SIZE);
if (ret)
BT_ERR("Failed to download caibration data\n");
return 0;
}
static int
btmrvl_process_cal_cfg(struct btmrvl_private *priv, u8 *data, u32 size)
{
u8 cal_data[BT_CAL_DATA_SIZE];
int ret;
ret = btmrvl_parse_cal_cfg(data, size, cal_data, sizeof(cal_data));
if (ret)
return ret;
ret = btmrvl_load_cal_data(priv, cal_data);
if (ret) {
BT_ERR("Fail to load calibrate data");
return ret;
}
return 0;
}
static int btmrvl_cal_data_config(struct btmrvl_private *priv)
{
const struct firmware *cfg;
int ret;
const char *cal_data = priv->btmrvl_dev.cal_data;
if (!cal_data)
return 0;
ret = request_firmware(&cfg, cal_data, priv->btmrvl_dev.dev);
if (ret < 0) {
BT_DBG("Failed to get %s file, skipping cal data download",
cal_data);
return 0;
}
ret = btmrvl_process_cal_cfg(priv, (u8 *)cfg->data, cfg->size);
release_firmware(cfg);
return ret;
}
static int btmrvl_setup(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
if (btmrvl_cal_data_config(priv))
BT_ERR("Set cal data failed");
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
priv->btmrvl_dev.gpio_gap = 0xffff;
btmrvl_send_hscfg_cmd(priv);
return 0;
}
/*
* This function handles the event generated by firmware, rx data
* received from firmware, and tx data sent from kernel.
......@@ -572,8 +656,7 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
hdev->flush = btmrvl_flush;
hdev->send = btmrvl_send_frame;
hdev->ioctl = btmrvl_ioctl;
btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
hdev->setup = btmrvl_setup;
hdev->dev_type = priv->btmrvl_dev.dev_type;
......
......@@ -18,7 +18,6 @@
* this warranty disclaimer.
**/
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/mmc/sdio_ids.h>
......@@ -102,6 +101,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
.helper = "mrvl/sd8688_helper.bin",
.firmware = "mrvl/sd8688.bin",
.cal_data = NULL,
.reg = &btmrvl_reg_8688,
.sd_blksz_fw_dl = 64,
};
......@@ -109,6 +109,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
.cal_data = NULL,
.reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
......@@ -116,6 +117,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
.helper = NULL,
.firmware = "mrvl/sd8797_uapsta.bin",
.cal_data = "mrvl/sd8797_caldata.conf",
.reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
......@@ -123,6 +125,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
.helper = NULL,
.firmware = "mrvl/sd8897_uapsta.bin",
.cal_data = NULL,
.reg = &btmrvl_reg_88xx,
.sd_blksz_fw_dl = 256,
};
......@@ -1006,6 +1009,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
struct btmrvl_sdio_device *data = (void *) id->driver_data;
card->helper = data->helper;
card->firmware = data->firmware;
card->cal_data = data->cal_data;
card->reg = data->reg;
card->sd_blksz_fw_dl = data->sd_blksz_fw_dl;
}
......@@ -1034,6 +1038,8 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
}
card->priv = priv;
priv->btmrvl_dev.dev = &card->func->dev;
priv->btmrvl_dev.cal_data = card->cal_data;
/* Initialize the interface specific function pointers */
priv->hw_host_to_card = btmrvl_sdio_host_to_card;
......@@ -1046,12 +1052,6 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
goto disable_host_int;
}
priv->btmrvl_dev.psmode = 1;
btmrvl_enable_ps(priv);
priv->btmrvl_dev.gpio_gap = 0xffff;
btmrvl_send_hscfg_cmd(priv);
return 0;
disable_host_int:
......@@ -1222,4 +1222,5 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
MODULE_FIRMWARE("mrvl/sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_caldata.conf");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
......@@ -85,6 +85,7 @@ struct btmrvl_sdio_card {
u32 ioport;
const char *helper;
const char *firmware;
const char *cal_data;
const struct btmrvl_sdio_card_reg *reg;
u16 sd_blksz_fw_dl;
u8 rx_unit;
......@@ -94,6 +95,7 @@ struct btmrvl_sdio_card {
struct btmrvl_sdio_device {
const char *helper;
const char *firmware;
const char *cal_data;
const struct btmrvl_sdio_card_reg *reg;
u16 sd_blksz_fw_dl;
};
......
......@@ -24,6 +24,7 @@
*/
#include <linux/module.h>
#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/init.h>
......@@ -39,17 +40,17 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#define VERSION "1.3"
#define VERSION "1.4"
static bool amp;
struct vhci_data {
struct hci_dev *hdev;
unsigned long flags;
wait_queue_head_t read_wait;
struct sk_buff_head readq;
struct delayed_work open_timeout;
};
static int vhci_open_dev(struct hci_dev *hdev)
......@@ -99,16 +100,62 @@ static int vhci_send_frame(struct sk_buff *skb)
skb_queue_tail(&data->readq, skb);
wake_up_interruptible(&data->read_wait);
return 0;
}
static int vhci_create_device(struct vhci_data *data, __u8 dev_type)
{
struct hci_dev *hdev;
struct sk_buff *skb;
skb = bt_skb_alloc(4, GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdev = hci_alloc_dev();
if (!hdev) {
kfree_skb(skb);
return -ENOMEM;
}
data->hdev = hdev;
hdev->bus = HCI_VIRTUAL;
hdev->dev_type = dev_type;
hci_set_drvdata(hdev, data);
hdev->open = vhci_open_dev;
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
hdev->send = vhci_send_frame;
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
hci_free_dev(hdev);
data->hdev = NULL;
kfree_skb(skb);
return -EBUSY;
}
bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
*skb_put(skb, 1) = 0xff;
*skb_put(skb, 1) = dev_type;
put_unaligned_le16(hdev->id, skb_put(skb, 2));
skb_queue_tail(&data->readq, skb);
wake_up_interruptible(&data->read_wait);
return 0;
}
static inline ssize_t vhci_get_user(struct vhci_data *data,
const char __user *buf, size_t count)
const char __user *buf, size_t count)
{
struct sk_buff *skb;
__u8 pkt_type, dev_type;
int ret;
if (count > HCI_MAX_FRAME_SIZE)
if (count < 2 || count > HCI_MAX_FRAME_SIZE)
return -EINVAL;
skb = bt_skb_alloc(count, GFP_KERNEL);
......@@ -120,27 +167,70 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
return -EFAULT;
}
skb->dev = (void *) data->hdev;
bt_cb(skb)->pkt_type = *((__u8 *) skb->data);
pkt_type = *((__u8 *) skb->data);
skb_pull(skb, 1);
hci_recv_frame(skb);
switch (pkt_type) {
case HCI_EVENT_PKT:
case HCI_ACLDATA_PKT:
case HCI_SCODATA_PKT:
if (!data->hdev) {
kfree_skb(skb);
return -ENODEV;
}
skb->dev = (void *) data->hdev;
bt_cb(skb)->pkt_type = pkt_type;
ret = hci_recv_frame(skb);
break;
case HCI_VENDOR_PKT:
if (data->hdev) {
kfree_skb(skb);
return -EBADFD;
}
return count;
cancel_delayed_work_sync(&data->open_timeout);
dev_type = *((__u8 *) skb->data);
skb_pull(skb, 1);
if (skb->len > 0) {
kfree_skb(skb);
return -EINVAL;
}
kfree_skb(skb);
if (dev_type != HCI_BREDR && dev_type != HCI_AMP)
return -EINVAL;
ret = vhci_create_device(data, dev_type);
break;
default:
kfree_skb(skb);
return -EINVAL;
}
return (ret < 0) ? ret : count;
}
static inline ssize_t vhci_put_user(struct vhci_data *data,
struct sk_buff *skb, char __user *buf, int count)
struct sk_buff *skb,
char __user *buf, int count)
{
char __user *ptr = buf;
int len, total = 0;
int len;
len = min_t(unsigned int, skb->len, count);
if (copy_to_user(ptr, skb->data, len))
return -EFAULT;
total += len;
if (!data->hdev)
return len;
data->hdev->stat.byte_tx += len;
......@@ -148,21 +238,19 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
case HCI_COMMAND_PKT:
data->hdev->stat.cmd_tx++;
break;
case HCI_ACLDATA_PKT:
data->hdev->stat.acl_tx++;
break;
case HCI_SCODATA_PKT:
data->hdev->stat.sco_tx++;
break;
}
return total;
return len;
}
static ssize_t vhci_read(struct file *file,
char __user *buf, size_t count, loff_t *pos)
char __user *buf, size_t count, loff_t *pos)
{
struct vhci_data *data = file->private_data;
struct sk_buff *skb;
......@@ -185,7 +273,7 @@ static ssize_t vhci_read(struct file *file,
}
ret = wait_event_interruptible(data->read_wait,
!skb_queue_empty(&data->readq));
!skb_queue_empty(&data->readq));
if (ret < 0)
break;
}
......@@ -194,7 +282,7 @@ static ssize_t vhci_read(struct file *file,
}
static ssize_t vhci_write(struct file *file,
const char __user *buf, size_t count, loff_t *pos)
const char __user *buf, size_t count, loff_t *pos)
{
struct vhci_data *data = file->private_data;
......@@ -213,10 +301,17 @@ static unsigned int vhci_poll(struct file *file, poll_table *wait)
return POLLOUT | POLLWRNORM;
}
static void vhci_open_timeout(struct work_struct *work)
{
struct vhci_data *data = container_of(work, struct vhci_data,
open_timeout.work);
vhci_create_device(data, amp ? HCI_AMP : HCI_BREDR);
}
static int vhci_open(struct inode *inode, struct file *file)
{
struct vhci_data *data;
struct hci_dev *hdev;
data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL);
if (!data)
......@@ -225,35 +320,13 @@ static int vhci_open(struct inode *inode, struct file *file)
skb_queue_head_init(&data->readq);
init_waitqueue_head(&data->read_wait);
hdev = hci_alloc_dev();
if (!hdev) {
kfree(data);
return -ENOMEM;
}
data->hdev = hdev;
hdev->bus = HCI_VIRTUAL;
hci_set_drvdata(hdev, data);
if (amp)
hdev->dev_type = HCI_AMP;
hdev->open = vhci_open_dev;
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
hdev->send = vhci_send_frame;
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
kfree(data);
hci_free_dev(hdev);
return -EBUSY;
}
INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
file->private_data = data;
nonseekable_open(inode, file);
schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000));
return 0;
}
......@@ -262,8 +335,12 @@ static int vhci_release(struct inode *inode, struct file *file)
struct vhci_data *data = file->private_data;
struct hci_dev *hdev = data->hdev;
hci_unregister_dev(hdev);
hci_free_dev(hdev);
cancel_delayed_work_sync(&data->open_timeout);
if (hdev) {
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
file->private_data = NULL;
kfree(data);
......@@ -309,3 +386,4 @@ MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS("devname:vhci");
......@@ -1924,7 +1924,6 @@ static int adm8211_probe(struct pci_dev *pdev,
pci_iounmap(pdev, priv->map);
err_free_dev:
pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(dev);
err_free_reg:
......
......@@ -5570,7 +5570,6 @@ static void airo_pci_remove(struct pci_dev *pdev)
airo_print_info(dev->name, "Unregistering...");
stop_airo_card(dev, 1);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
......
......@@ -1762,6 +1762,7 @@ static struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */
AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */
AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */
AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
......
......@@ -22,7 +22,8 @@
void ath10k_bmi_start(struct ath10k *ar)
{
ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
ar->bmi.done_sent = false;
}
......@@ -32,8 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
if (ar->bmi.done_sent) {
ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
return 0;
}
......@@ -46,7 +49,6 @@ int ath10k_bmi_done(struct ath10k *ar)
return ret;
}
ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
return 0;
}
......@@ -59,6 +61,8 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
u32 resplen = sizeof(resp.get_target_info);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
if (ar->bmi.done_sent) {
ath10k_warn("BMI Get Target Info Command disallowed\n");
return -EBUSY;
......@@ -80,6 +84,7 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
target_info->version = __le32_to_cpu(resp.get_target_info.version);
target_info->type = __le32_to_cpu(resp.get_target_info.type);
return 0;
}
......@@ -92,15 +97,14 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
u32 rxlen;
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
ath10k_dbg(ATH10K_DBG_CORE,
"%s: (device: 0x%p, address: 0x%x, length: %d)\n",
__func__, ar, address, length);
while (length) {
rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
......@@ -133,15 +137,14 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
u32 txlen;
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
ath10k_dbg(ATH10K_DBG_CORE,
"%s: (device: 0x%p, address: 0x%x, length: %d)\n",
__func__, ar, address, length);
while (length) {
txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
......@@ -180,15 +183,14 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
u32 resplen = sizeof(resp.execute);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
address, *param);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
}
ath10k_dbg(ATH10K_DBG_CORE,
"%s: (device: 0x%p, address: 0x%x, param: %d)\n",
__func__, ar, address, *param);
cmd.id = __cpu_to_le32(BMI_EXECUTE);
cmd.execute.addr = __cpu_to_le32(address);
cmd.execute.param = __cpu_to_le32(*param);
......@@ -216,6 +218,9 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
......@@ -250,6 +255,9 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
int ret;
ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
address);
if (ar->bmi.done_sent) {
ath10k_warn("command disallowed\n");
return -EBUSY;
......@@ -275,6 +283,10 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
u32 trailer_len = length - head_len;
int ret;
ath10k_dbg(ATH10K_DBG_BMI,
"bmi fast download address 0x%x buffer 0x%p length %d\n",
address, buffer, length);
ret = ath10k_bmi_lz_stream_start(ar, address);
if (ret)
return ret;
......
This diff is collapsed.
......@@ -27,7 +27,6 @@
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
#define CE_SENDLIST_ITEMS_MAX 12
#define CE_SEND_FLAG_GATHER 0x00010000
/*
......@@ -36,16 +35,9 @@
* how to use copy engines.
*/
struct ce_state;
struct ath10k_ce_pipe;
/* Copy Engine operational state */
enum ce_op_state {
CE_UNUSED,
CE_PAUSED,
CE_RUNNING,
};
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
......@@ -57,8 +49,7 @@ struct ce_desc {
__le16 flags; /* %CE_DESC_FLAGS_ */
};
/* Copy Engine Ring internal state */
struct ce_ring_state {
struct ath10k_ce_ring {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
unsigned int nentries_mask;
......@@ -116,49 +107,20 @@ struct ce_ring_state {
void **per_transfer_context;
};
/* Copy Engine internal state */
struct ce_state {
struct ath10k_ce_pipe {
struct ath10k *ar;
unsigned int id;
unsigned int attr_flags;
u32 ctrl_addr;
enum ce_op_state state;
void (*send_cb) (struct ce_state *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id);
void (*recv_cb) (struct ce_state *ce_state,
void *per_transfer_recv_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags);
unsigned int src_sz_max;
struct ce_ring_state *src_ring;
struct ce_ring_state *dest_ring;
};
void (*send_cb)(struct ath10k_ce_pipe *);
void (*recv_cb)(struct ath10k_ce_pipe *);
struct ce_sendlist_item {
/* e.g. buffer or desc list */
dma_addr_t data;
union {
/* simple buffer */
unsigned int nbytes;
/* Rx descriptor list */
unsigned int ndesc;
} u;
/* externally-specified flags; OR-ed with internal flags */
u32 flags;
};
struct ce_sendlist {
unsigned int num_items;
struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
unsigned int src_sz_max;
struct ath10k_ce_ring *src_ring;
struct ath10k_ce_ring *dest_ring;
};
/* Copy Engine settable attributes */
......@@ -182,7 +144,7 @@ struct ce_attr;
*
* Implementation note: pushes 1 buffer to Source ring
*/
int ath10k_ce_send(struct ce_state *ce_state,
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
u32 buffer,
unsigned int nbytes,
......@@ -190,21 +152,10 @@ int ath10k_ce_send(struct ce_state *ce_state,
unsigned int transfer_id,
unsigned int flags);
void ath10k_ce_send_cb_register(struct ce_state *ce_state,
void (*send_cb) (struct ce_state *ce_state,
void *transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id),
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
/* Append a simple buffer (address/length) to a sendlist. */
void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
u32 buffer,
unsigned int nbytes,
/* OR-ed with internal flags */
u32 flags);
/*
* Queue a "sendlist" of buffers to be sent using gather to a single
* anonymous destination buffer
......@@ -215,11 +166,11 @@ void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
*
* Implemenation note: Pushes multiple buffers with Gather to Source ring.
*/
int ath10k_ce_sendlist_send(struct ce_state *ce_state,
void *per_transfer_send_context,
struct ce_sendlist *sendlist,
/* 14 bits */
unsigned int transfer_id);
int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
unsigned int transfer_id,
u32 paddr, unsigned int nbytes,
u32 flags);
/*==================Recv=======================*/
......@@ -233,17 +184,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
*
* Implemenation note: Pushes a buffer to Dest ring.
*/
int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
void *per_transfer_recv_context,
u32 buffer);
void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
void (*recv_cb) (struct ce_state *ce_state,
void *transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags));
void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
void (*recv_cb)(struct ath10k_ce_pipe *));
/* recv flags */
/* Data is byte-swapped */
......@@ -253,7 +199,7 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
* Supply data for the next completed unprocessed receive descriptor.
* Pops buffer from Dest ring.
*/
int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
......@@ -263,7 +209,7 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
*/
int ath10k_ce_completed_send_next(struct ce_state *ce_state,
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
......@@ -272,7 +218,7 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
/*==================CE Engine Initialization=======================*/
/* Initialize an instance of a CE */
struct ce_state *ath10k_ce_init(struct ath10k *ar,
struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr);
......@@ -282,7 +228,7 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
* receive buffers. Target DMA must be stopped before using
* this API.
*/
int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
......@@ -291,13 +237,13 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
* pending sends. Target DMA must be stopped before using
* this API.
*/
int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
void ath10k_ce_deinit(struct ce_state *ce_state);
void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
......@@ -322,9 +268,6 @@ struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
/* currently not in use */
unsigned int priority;
/* #entries in source ring - Must be a power of 2 */
unsigned int src_nentries;
......@@ -336,21 +279,8 @@ struct ce_attr {
/* #entries in destination ring - Must be a power of 2 */
unsigned int dest_nentries;
/* Future use */
void *reserved;
};
/*
* When using sendlist_send to transfer multiple buffer fragments, the
* transfer context of each fragment, except last one, will be filled
* with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
* each fragment done with send and the transfer context would be
* CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
* status of a send completion.
*/
#define CE_SENDLIST_ITEM_CTXT ((void *)0xcecebeef)
#define SR_BA_ADDRESS 0x0000
#define SR_SIZE_ADDRESS 0x0004
#define DR_BA_ADDRESS 0x0008
......
......@@ -38,17 +38,6 @@ MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
.id = QCA988X_HW_1_0_VERSION,
.name = "qca988x hw1.0",
.patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
.fw = {
.dir = QCA988X_HW_1_0_FW_DIR,
.fw = QCA988X_HW_1_0_FW_FILE,
.otp = QCA988X_HW_1_0_OTP_FILE,
.board = QCA988X_HW_1_0_BOARD_DATA_FILE,
},
},
{
.id = QCA988X_HW_2_0_VERSION,
.name = "qca988x hw2.0",
......@@ -64,7 +53,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
static void ath10k_send_suspend_complete(struct ath10k *ar)
{
ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
ar->is_target_paused = true;
wake_up(&ar->event_queue);
......@@ -112,7 +101,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
goto timeout;
}
ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
return 0;
timeout:
......@@ -214,8 +203,8 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
return ret;
}
ath10k_dbg(ATH10K_DBG_CORE,
"ath10k: Board extended Data download addr: 0x%x\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot push board extended data addr 0x%x\n",
board_ext_data_addr);
if (board_ext_data_addr == 0)
......@@ -446,6 +435,13 @@ static int ath10k_init_uart(struct ath10k *ar)
return ret;
}
/* Set the UART baud rate to 19200. */
ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
if (ret) {
ath10k_warn("could not set the baud rate (%d)\n", ret);
return ret;
}
ath10k_info("UART prints enabled\n");
return 0;
}
......@@ -641,6 +637,10 @@ int ath10k_core_start(struct ath10k *ar)
if (status)
goto err_disconnect_htc;
status = ath10k_debug_start(ar);
if (status)
goto err_disconnect_htc;
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
return 0;
......@@ -658,6 +658,7 @@ EXPORT_SYMBOL(ath10k_core_start);
void ath10k_core_stop(struct ath10k *ar)
{
ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc);
ath10k_htt_detach(&ar->htt);
ath10k_wmi_detach(ar);
......@@ -717,10 +718,46 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return 0;
}
int ath10k_core_register(struct ath10k *ar)
static int ath10k_core_check_chip_id(struct ath10k *ar)
{
u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
ar->chip_id, hw_revision);
/* Check that we are not using hw1.0 (some of them have same pci id
* as hw2.0) before doing anything else as ath10k crashes horribly
* due to missing hw1.0 workarounds. */
switch (hw_revision) {
case QCA988X_HW_1_0_CHIP_ID_REV:
ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
return -EOPNOTSUPP;
case QCA988X_HW_2_0_CHIP_ID_REV:
/* known hardware revision, continue normally */
return 0;
default:
ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
ar->chip_id);
return 0;
}
return 0;
}
int ath10k_core_register(struct ath10k *ar, u32 chip_id)
{
int status;
ar->chip_id = chip_id;
status = ath10k_core_check_chip_id(ar);
if (status) {
ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
return status;
}
status = ath10k_core_probe_fw(ar);
if (status) {
ath10k_err("could not probe fw (%d)\n", status);
......@@ -755,6 +792,7 @@ void ath10k_core_unregister(struct ath10k *ar)
* Otherwise we will fail to submit commands to FW and mac80211 will be
* unhappy about callback failures. */
ath10k_mac_unregister(ar);
ath10k_core_free_firmware_files(ar);
}
EXPORT_SYMBOL(ath10k_core_unregister);
......
......@@ -52,18 +52,12 @@ struct ath10k_skb_cb {
struct {
u8 vdev_id;
u16 msdu_id;
u8 tid;
bool is_offchan;
bool is_conf;
bool discard;
bool no_ack;
u8 refcount;
struct sk_buff *txfrag;
struct sk_buff *msdu;
} __packed htt;
/* 4 bytes left on 64bit arch */
u8 frag_len;
u8 pad_len;
} __packed htt;
} __packed;
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
......@@ -112,11 +106,7 @@ struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
atomic_t pending_tx_count;
wait_queue_head_t wq;
struct sk_buff_head wmi_event_list;
struct work_struct wmi_event_work;
wait_queue_head_t tx_credits_wq;
};
struct ath10k_peer_stat {
......@@ -203,6 +193,7 @@ struct ath10k_vif {
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
u32 dtim_period;
struct sk_buff *beacon;
struct ath10k *ar;
struct ieee80211_vif *vif;
......@@ -246,6 +237,9 @@ struct ath10k_debug {
u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
struct completion event_stats_compl;
unsigned long htt_stats_mask;
struct delayed_work htt_stats_dwork;
};
enum ath10k_state {
......@@ -270,12 +264,21 @@ enum ath10k_state {
ATH10K_STATE_WEDGED,
};
enum ath10k_fw_features {
/* wmi_mgmt_rx_hdr contains extra RSSI information */
ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
struct device *dev;
u8 mac_addr[ETH_ALEN];
u32 chip_id;
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
......@@ -288,6 +291,8 @@ struct ath10k {
u32 vht_cap_info;
u32 num_rf_chains;
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
struct targetdef *targetdef;
struct hostdef *hostdef;
......@@ -393,7 +398,7 @@ void ath10k_core_destroy(struct ath10k *ar);
int ath10k_core_start(struct ath10k *ar);
void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar, u32 chip_id);
void ath10k_core_unregister(struct ath10k *ar);
#endif /* _CORE_H_ */
......@@ -21,6 +21,9 @@
#include "core.h"
#include "debug.h"
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
static int ath10k_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
......@@ -260,7 +263,6 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
}
spin_unlock_bh(&ar->data_lock);
mutex_unlock(&ar->conf_mutex);
complete(&ar->debug.event_stats_compl);
}
......@@ -499,6 +501,136 @@ static const struct file_operations fops_simulate_fw_crash = {
.llseek = default_llseek,
};
static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
unsigned int len;
char buf[50];
len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_chip_id = {
.read = ath10k_read_chip_id,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static int ath10k_debug_htt_stats_req(struct ath10k *ar)
{
u64 cookie;
int ret;
lockdep_assert_held(&ar->conf_mutex);
if (ar->debug.htt_stats_mask == 0)
/* htt stats are disabled */
return 0;
if (ar->state != ATH10K_STATE_ON)
return 0;
cookie = get_jiffies_64();
ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
cookie);
if (ret) {
ath10k_warn("failed to send htt stats request: %d\n", ret);
return ret;
}
queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
return 0;
}
static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k,
debug.htt_stats_dwork.work);
mutex_lock(&ar->conf_mutex);
ath10k_debug_htt_stats_req(ar);
mutex_unlock(&ar->conf_mutex);
}
static ssize_t ath10k_read_htt_stats_mask(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
char buf[32];
unsigned int len;
len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath10k_write_htt_stats_mask(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
unsigned long mask;
int ret;
ret = kstrtoul_from_user(user_buf, count, 0, &mask);
if (ret)
return ret;
/* max 8 bit masks (for now) */
if (mask > 0xff)
return -E2BIG;
mutex_lock(&ar->conf_mutex);
ar->debug.htt_stats_mask = mask;
ret = ath10k_debug_htt_stats_req(ar);
if (ret)
goto out;
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_htt_stats_mask = {
.read = ath10k_read_htt_stats_mask,
.write = ath10k_write_htt_stats_mask,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
int ath10k_debug_start(struct ath10k *ar)
{
int ret;
ret = ath10k_debug_htt_stats_req(ar);
if (ret)
/* continue normally anyway, this isn't serious */
ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
return 0;
}
void ath10k_debug_stop(struct ath10k *ar)
{
cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
}
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
......@@ -507,6 +639,9 @@ int ath10k_debug_create(struct ath10k *ar)
if (!ar->debug.debugfs_phy)
return -ENOMEM;
INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
ath10k_debug_htt_stats_dwork);
init_completion(&ar->debug.event_stats_compl);
debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
......@@ -518,8 +653,15 @@ int ath10k_debug_create(struct ath10k *ar)
debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_simulate_fw_crash);
debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_chip_id);
debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_htt_stats_mask);
return 0;
}
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
......
......@@ -27,11 +27,12 @@ enum ath10k_debug_mask {
ATH10K_DBG_HTC = 0x00000004,
ATH10K_DBG_HTT = 0x00000008,
ATH10K_DBG_MAC = 0x00000010,
ATH10K_DBG_CORE = 0x00000020,
ATH10K_DBG_BOOT = 0x00000020,
ATH10K_DBG_PCI_DUMP = 0x00000040,
ATH10K_DBG_HTT_DUMP = 0x00000080,
ATH10K_DBG_MGMT = 0x00000100,
ATH10K_DBG_DATA = 0x00000200,
ATH10K_DBG_BMI = 0x00000400,
ATH10K_DBG_ANY = 0xffffffff,
};
......@@ -42,6 +43,8 @@ __printf(1, 2) int ath10k_err(const char *fmt, ...);
__printf(1, 2) int ath10k_warn(const char *fmt, ...);
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_debug_start(struct ath10k *ar);
void ath10k_debug_stop(struct ath10k *ar);
int ath10k_debug_create(struct ath10k *ar);
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
......@@ -50,6 +53,15 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
#else
static inline int ath10k_debug_start(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_debug_stop(struct ath10k *ar)
{
}
static inline int ath10k_debug_create(struct ath10k *ar)
{
return 0;
......
......@@ -103,10 +103,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct ath10k_htc_hdr *hdr;
hdr = (struct ath10k_htc_hdr *)skb->data;
memset(hdr, 0, sizeof(*hdr));
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
......@@ -117,134 +117,13 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
spin_unlock_bh(&ep->htc->tx_lock);
}
static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
struct ath10k_htc_ep *ep,
struct sk_buff *skb,
u8 credits)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
int ret;
ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
ep->eid, skb);
ath10k_htc_prepare_tx_skb(ep, skb);
ret = ath10k_skb_map(htc->ar->dev, skb);
if (ret)
goto err;
ret = ath10k_hif_send_head(htc->ar,
ep->ul_pipe_id,
ep->eid,
skb->len,
skb);
if (unlikely(ret))
goto err;
return 0;
err:
ath10k_warn("HTC issue failed: %d\n", ret);
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
spin_unlock_bh(&htc->tx_lock);
/* this is the simplest way to handle out-of-resources for non-credit
* based endpoints. credit based endpoints can still get -ENOSR, but
* this is highly unlikely as credit reservation should prevent that */
if (ret == -ENOSR) {
spin_lock_bh(&htc->tx_lock);
__skb_queue_head(&ep->tx_queue, skb);
spin_unlock_bh(&htc->tx_lock);
return ret;
}
skb_cb->is_aborted = true;
ath10k_htc_notify_tx_completion(ep, skb);
return ret;
}
static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
struct ath10k_htc_ep *ep,
u8 *credits)
{
struct sk_buff *skb;
struct ath10k_skb_cb *skb_cb;
int credits_required;
int remainder;
unsigned int transfer_len;
lockdep_assert_held(&htc->tx_lock);
skb = __skb_dequeue(&ep->tx_queue);
if (!skb)
return NULL;
skb_cb = ATH10K_SKB_CB(skb);
transfer_len = skb->len;
if (likely(transfer_len <= htc->target_credit_size)) {
credits_required = 1;
} else {
/* figure out how many credits this message requires */
credits_required = transfer_len / htc->target_credit_size;
remainder = transfer_len % htc->target_credit_size;
if (remainder)
credits_required++;
}
ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
credits_required, ep->tx_credits);
if (ep->tx_credits < credits_required) {
__skb_queue_head(&ep->tx_queue, skb);
return NULL;
}
ep->tx_credits -= credits_required;
*credits = credits_required;
return skb;
}
static void ath10k_htc_send_work(struct work_struct *work)
{
struct ath10k_htc_ep *ep = container_of(work,
struct ath10k_htc_ep, send_work);
struct ath10k_htc *htc = ep->htc;
struct sk_buff *skb;
u8 credits = 0;
int ret;
while (true) {
if (ep->ul_is_polled)
ath10k_htc_send_complete_check(ep, 0);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credit_flow_enabled)
skb = ath10k_htc_get_skb_credit_based(htc, ep,
&credits);
else
skb = __skb_dequeue(&ep->tx_queue);
spin_unlock_bh(&htc->tx_lock);
if (!skb)
break;
ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
if (ret == -ENOSR)
break;
}
}
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
int credits = 0;
int ret;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
......@@ -254,18 +133,55 @@ int ath10k_htc_send(struct ath10k_htc *htc,
return -ENOENT;
}
/* FIXME: This looks ugly, can we fix it? */
spin_lock_bh(&htc->tx_lock);
if (htc->stopped) {
spin_unlock_bh(&htc->tx_lock);
return -ESHUTDOWN;
}
spin_unlock_bh(&htc->tx_lock);
__skb_queue_tail(&ep->tx_queue, skb);
skb_push(skb, sizeof(struct ath10k_htc_hdr));
spin_unlock_bh(&htc->tx_lock);
queue_work(htc->ar->workqueue, &ep->send_work);
if (ep->tx_credit_flow_enabled) {
credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
spin_unlock_bh(&htc->tx_lock);
ret = -EAGAIN;
goto err_pull;
}
ep->tx_credits -= credits;
spin_unlock_bh(&htc->tx_lock);
}
ath10k_htc_prepare_tx_skb(ep, skb);
ret = ath10k_skb_map(htc->ar->dev, skb);
if (ret)
goto err_credits;
ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
skb->len, skb);
if (ret)
goto err_unmap;
return 0;
err_unmap:
ath10k_skb_unmap(htc->ar->dev, skb);
err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
ep->ep_ops.ep_tx_credits(htc->ar);
}
err_pull:
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
return ret;
}
static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
......@@ -278,39 +194,9 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
/* note: when using TX credit flow, the re-checking of queues happens
* when credits flow back from the target. in the non-TX credit case,
* we recheck after the packet completes */
spin_lock_bh(&htc->tx_lock);
if (!ep->tx_credit_flow_enabled && !htc->stopped)
queue_work(ar->workqueue, &ep->send_work);
spin_unlock_bh(&htc->tx_lock);
return 0;
}
/* flush endpoint TX queue */
static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
struct ath10k_htc_ep *ep)
{
struct sk_buff *skb;
struct ath10k_skb_cb *skb_cb;
spin_lock_bh(&htc->tx_lock);
for (;;) {
skb = __skb_dequeue(&ep->tx_queue);
if (!skb)
break;
skb_cb = ATH10K_SKB_CB(skb);
skb_cb->is_aborted = true;
ath10k_htc_notify_tx_completion(ep, skb);
}
spin_unlock_bh(&htc->tx_lock);
cancel_work_sync(&ep->send_work);
}
/***********/
/* Receive */
/***********/
......@@ -340,8 +226,11 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
queue_work(htc->ar->workqueue, &ep->send_work);
if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ar);
spin_lock_bh(&htc->tx_lock);
}
}
spin_unlock_bh(&htc->tx_lock);
}
......@@ -599,10 +488,8 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
ep->max_ep_message_len = 0;
ep->max_tx_queue_depth = 0;
ep->eid = i;
skb_queue_head_init(&ep->tx_queue);
ep->htc = htc;
ep->tx_credit_flow_enabled = true;
INIT_WORK(&ep->send_work, ath10k_htc_send_work);
}
}
......@@ -752,8 +639,8 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
ath10k_dbg(ATH10K_DBG_HTC,
"HTC Service %s does not allocate target credits\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
......@@ -772,16 +659,16 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
req_msg = &msg->connect_service;
req_msg->flags = __cpu_to_le16(flags);
req_msg->service_id = __cpu_to_le16(conn_req->service_id);
/* Only enable credit flow control for WMI ctrl service */
if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
req_msg = &msg->connect_service;
req_msg->flags = __cpu_to_le16(flags);
req_msg->service_id = __cpu_to_le16(conn_req->service_id);
INIT_COMPLETION(htc->ctl_resp);
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
......@@ -873,19 +760,19 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
if (status)
return status;
ath10k_dbg(ATH10K_DBG_HTC,
"HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
ath10k_dbg(ATH10K_DBG_HTC,
"EP %d UL polled: %d, DL polled: %d\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot htc ep %d ul polled %d dl polled %d\n",
ep->eid, ep->ul_is_polled, ep->dl_is_polled);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
ath10k_dbg(ATH10K_DBG_HTC,
"HTC service: %s eid: %d TX flow control disabled\n",
ath10k_dbg(ATH10K_DBG_BOOT,
"boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
......@@ -945,18 +832,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
*/
void ath10k_htc_stop(struct ath10k_htc *htc)
{
int i;
struct ath10k_htc_ep *ep;
spin_lock_bh(&htc->tx_lock);
htc->stopped = true;
spin_unlock_bh(&htc->tx_lock);
for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
ep = &htc->endpoint[i];
ath10k_htc_flush_endpoint_tx(htc, ep);
}
ath10k_hif_stop(htc->ar);
}
......
......@@ -276,6 +276,7 @@ struct ath10k_htc_ops {
struct ath10k_htc_ep_ops {
void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
void (*ep_tx_credits)(struct ath10k *);
};
/* service connection information */
......@@ -315,15 +316,11 @@ struct ath10k_htc_ep {
int ul_is_polled; /* call HIF to get tx completions */
int dl_is_polled; /* call HIF to fetch rx (not implemented) */
struct sk_buff_head tx_queue;
u8 seq_no; /* for debugging */
int tx_credits;
int tx_credit_size;
int tx_credits_per_max_message;
bool tx_credit_flow_enabled;
struct work_struct send_work;
};
struct ath10k_htc_svc_tx_credits {
......
......@@ -104,21 +104,16 @@ int ath10k_htt_attach(struct ath10k *ar)
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
ath10k_dbg(ATH10K_DBG_HTT,
"htt target version %d.%d; host version %d.%d\n",
htt->target_version_major,
htt->target_version_minor,
HTT_CURRENT_VERSION_MAJOR,
HTT_CURRENT_VERSION_MINOR);
if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
ath10k_err("htt major versions are incompatible!\n");
ath10k_info("htt target version %d.%d\n",
htt->target_version_major, htt->target_version_minor);
if (htt->target_version_major != 2 &&
htt->target_version_major != 3) {
ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n",
htt->target_version_major);
return -ENOTSUPP;
}
if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
ath10k_warn("htt minor version differ but still compatible\n");
return 0;
}
......
......@@ -19,13 +19,11 @@
#define _HTT_H_
#include <linux/bug.h>
#include <linux/interrupt.h>
#include "htc.h"
#include "rx_desc.h"
#define HTT_CURRENT_VERSION_MAJOR 2
#define HTT_CURRENT_VERSION_MINOR 1
enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
HTT_DBG_STATS_RX_REORDER = 1 << 1,
......@@ -45,6 +43,9 @@ enum htt_h2t_msg_type { /* host-to-target */
HTT_H2T_MSG_TYPE_SYNC = 4,
HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
/* This command is used for sending management frames in HTT < 3.0.
* HTT >= 3.0 uses TX_FRM for everything. */
HTT_H2T_MSG_TYPE_MGMT_TX = 7,
HTT_H2T_NUM_MSGS /* keep this last */
......@@ -1268,6 +1269,7 @@ struct ath10k_htt {
/* set if host-fw communication goes haywire
* used to avoid further failures */
bool rx_confused;
struct tasklet_struct rx_replenish_task;
};
#define RX_HTT_HDR_STATUS_LEN 64
......@@ -1308,6 +1310,10 @@ struct htt_rx_desc {
#define HTT_RX_BUF_SIZE 1920
#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
* aggregated traffic more nicely. */
#define ATH10K_HTT_MAX_NUM_REFILL 16
/*
* DMA_MAP expects the buffer to be an integral number of cache lines.
* Rather than checking the actual cache line size, this code makes a
......@@ -1327,6 +1333,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
......
This diff is collapsed.
This diff is collapsed.
......@@ -24,18 +24,14 @@
#define SUPPORTED_FW_MAJOR 1
#define SUPPORTED_FW_MINOR 0
#define SUPPORTED_FW_RELEASE 0
#define SUPPORTED_FW_BUILD 629
#define SUPPORTED_FW_BUILD 636
/* QCA988X 1.0 definitions */
#define QCA988X_HW_1_0_VERSION 0x4000002c
#define QCA988X_HW_1_0_FW_DIR "ath10k/QCA988X/hw1.0"
#define QCA988X_HW_1_0_FW_FILE "firmware.bin"
#define QCA988X_HW_1_0_OTP_FILE "otp.bin"
#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA988X 1.0 definitions (unsupported) */
#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
/* QCA988X 2.0 definitions */
#define QCA988X_HW_2_0_VERSION 0x4100016c
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
......@@ -53,6 +49,9 @@ enum ath10k_hw_txrx_mode {
ATH10K_HW_TXRX_RAW = 0,
ATH10K_HW_TXRX_NATIVE_WIFI = 1,
ATH10K_HW_TXRX_ETHERNET = 2,
/* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
ATH10K_HW_TXRX_MGMT = 3,
};
enum ath10k_mcast2ucast_mode {
......@@ -75,7 +74,11 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_RX_TIMEOUT_LO_PRI 100
#define TARGET_RX_TIMEOUT_HI_PRI 40
#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_ETHERNET
/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
* avoid a very expensive re-alignment in mac80211. */
#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
#define TARGET_SCAN_MAX_PENDING_REQS 4
#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
......@@ -169,6 +172,10 @@ enum ath10k_mcast2ucast_mode {
#define SOC_LPO_CAL_ENABLE_LSB 20
#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
#define SOC_CHIP_ID_ADDRESS 0x000000ec
#define SOC_CHIP_ID_REV_LSB 8
#define SOC_CHIP_ID_REV_MASK 0x00000f00
#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
......
This diff is collapsed.
This diff is collapsed.
......@@ -43,22 +43,23 @@ struct bmi_xfer {
u32 resp_len;
};
enum ath10k_pci_compl_state {
ATH10K_PCI_COMPL_FREE = 0,
ATH10K_PCI_COMPL_SEND,
ATH10K_PCI_COMPL_RECV,
};
struct ath10k_pci_compl {
struct list_head list;
int send_or_recv;
struct ce_state *ce_state;
struct hif_ce_pipe_info *pipe_info;
void *transfer_context;
enum ath10k_pci_compl_state state;
struct ath10k_ce_pipe *ce_state;
struct ath10k_pci_pipe *pipe_info;
struct sk_buff *skb;
unsigned int nbytes;
unsigned int transfer_id;
unsigned int flags;
};
/* compl_state.send_or_recv */
#define HIF_CE_COMPLETE_FREE 0
#define HIF_CE_COMPLETE_SEND 1
#define HIF_CE_COMPLETE_RECV 2
/*
* PCI-specific Target state
*
......@@ -152,17 +153,16 @@ struct service_to_pipe {
enum ath10k_pci_features {
ATH10K_PCI_FEATURE_MSI_X = 0,
ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1,
ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2,
ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1,
/* keep last */
ATH10K_PCI_FEATURE_COUNT
};
/* Per-pipe state. */
struct hif_ce_pipe_info {
struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
struct ce_state *ce_hdl;
struct ath10k_ce_pipe *ce_hdl;
/* Our pipe number; facilitiates use of pipe_info ptrs. */
u8 pipe_num;
......@@ -190,7 +190,6 @@ struct ath10k_pci {
struct device *dev;
struct ath10k *ar;
void __iomem *mem;
int cacheline_sz;
DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
......@@ -219,7 +218,7 @@ struct ath10k_pci {
bool compl_processing;
struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
struct ath10k_hif_cb msg_callbacks_current;
......@@ -227,16 +226,13 @@ struct ath10k_pci {
u32 fw_indicator_address;
/* Copy Engine used for Diagnostic Accesses */
struct ce_state *ce_diag;
struct ath10k_ce_pipe *ce_diag;
/* FIXME: document what this really protects */
spinlock_t ce_lock;
/* Map CE id to ce_state */
struct ce_state *ce_id_to_state[CE_COUNT_MAX];
/* makes sure that dummy reads are atomic */
spinlock_t hw_v1_workaround_lock;
struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
......@@ -244,14 +240,18 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
return ar->hif.priv;
}
static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
{
return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
{
iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
}
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
......@@ -310,23 +310,8 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *addr = ar_pci->mem;
if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
unsigned long irq_flags;
spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
ioread32(addr+offset+4); /* 3rd read prior to write */
ioread32(addr+offset+4); /* 2nd read prior to write */
ioread32(addr+offset+4); /* 1st read prior to write */
iowrite32(value, addr+offset);
spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
irq_flags);
} else {
iowrite32(value, addr+offset);
}
iowrite32(value, ar_pci->mem + offset);
}
static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
......@@ -336,15 +321,17 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
return ioread32(ar_pci->mem + offset);
}
void ath10k_do_pci_wake(struct ath10k *ar);
int ath10k_do_pci_wake(struct ath10k *ar);
void ath10k_do_pci_sleep(struct ath10k *ar);
static inline void ath10k_pci_wake(struct ath10k *ar)
static inline int ath10k_pci_wake(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_wake(ar);
return ath10k_do_pci_wake(ar);
return 0;
}
static inline void ath10k_pci_sleep(struct ath10k *ar)
......
......@@ -422,10 +422,30 @@ struct rx_mpdu_end {
#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14)
#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15)
/* The decapped header (rx_hdr_status) contains the following:
* a) 802.11 header
* [padding to 4 bytes]
* b) HW crypto parameter
* - 0 bytes for no security
* - 4 bytes for WEP
* - 8 bytes for TKIP, AES
* [padding to 4 bytes]
* c) A-MSDU subframe header (14 bytes) if appliable
* d) LLC/SNAP (RFC1042, 8 bytes)
*
* In case of A-MSDU only first frame in sequence contains (a) and (b). */
enum rx_msdu_decap_format {
RX_MSDU_DECAP_RAW = 0,
RX_MSDU_DECAP_NATIVE_WIFI = 1,
RX_MSDU_DECAP_RAW = 0,
/* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
* htt_rx_desc contains the original decapped 802.11 header. */
RX_MSDU_DECAP_NATIVE_WIFI = 1,
/* Payload contains an ethernet header (struct ethhdr). */
RX_MSDU_DECAP_ETHERNET2_DIX = 2,
/* Payload contains two 48-bit addresses and 2-byte length (14 bytes
* total), followed by an RFC1042 header (8 bytes). */
RX_MSDU_DECAP_8023_SNAP_LLC = 3
};
......
......@@ -111,26 +111,29 @@ TRACE_EVENT(ath10k_log_dbg_dump,
);
TRACE_EVENT(ath10k_wmi_cmd,
TP_PROTO(int id, void *buf, size_t buf_len),
TP_PROTO(int id, void *buf, size_t buf_len, int ret),
TP_ARGS(id, buf, buf_len),
TP_ARGS(id, buf, buf_len, ret),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
__field(int, ret)
),
TP_fast_assign(
__entry->id = id;
__entry->buf_len = buf_len;
__entry->ret = ret;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"id %d len %zu",
"id %d len %zu ret %d",
__entry->id,
__entry->buf_len
__entry->buf_len,
__entry->ret
)
);
......@@ -158,6 +161,27 @@ TRACE_EVENT(ath10k_wmi_event,
)
);
TRACE_EVENT(ath10k_htt_stats,
TP_PROTO(void *buf, size_t buf_len),
TP_ARGS(buf, buf_len),
TP_STRUCT__entry(
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"len %zu",
__entry->buf_len
)
);
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
......
......@@ -44,40 +44,39 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
spin_unlock_bh(&ar->data_lock);
}
void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
struct device *dev = htt->ar->dev;
struct ieee80211_tx_info *info;
struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu;
int ret;
if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
return;
ATH10K_SKB_CB(txdesc)->htt.refcount--;
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn("warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
return;
if (txfrag) {
ret = ath10k_skb_unmap(dev, txfrag);
if (ret)
ath10k_warn("txfrag unmap failed (%d)\n", ret);
dev_kfree_skb_any(txfrag);
}
msdu = htt->pending_tx[tx_done->msdu_id];
skb_cb = ATH10K_SKB_CB(msdu);
ret = ath10k_skb_unmap(dev, msdu);
if (ret)
ath10k_warn("data skb unmap failed (%d)\n", ret);
if (skb_cb->htt.frag_len)
skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
if (ATH10K_SKB_CB(txdesc)->htt.discard) {
if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
goto exit;
}
......@@ -85,7 +84,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status(htt->ar->hw, msdu);
......@@ -93,36 +92,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
exit:
spin_lock_bh(&htt->tx_lock);
htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
htt->pending_tx[tx_done->msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt);
if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
dev_kfree_skb_any(txdesc);
}
void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
struct sk_buff *txdesc;
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn("warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
return;
}
txdesc = htt->pending_tx[tx_done->msdu_id];
ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
ath10k_txrx_tx_unref(htt, txdesc);
}
static const u8 rx_legacy_rate_idx[] = {
......@@ -293,6 +268,8 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
status->vht_nss,
status->freq,
status->band);
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
info->skb->data, info->skb->len);
ieee80211_rx(ar->hw, info->skb);
}
......
......@@ -19,9 +19,8 @@
#include "htt.h"
void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done);
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done);
void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
......
This diff is collapsed.
......@@ -508,6 +508,48 @@ enum wmi_phy_mode {
MODE_MAX = 14
};
static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
{
switch (mode) {
case MODE_11A:
return "11a";
case MODE_11G:
return "11g";
case MODE_11B:
return "11b";
case MODE_11GONLY:
return "11gonly";
case MODE_11NA_HT20:
return "11na-ht20";
case MODE_11NG_HT20:
return "11ng-ht20";
case MODE_11NA_HT40:
return "11na-ht40";
case MODE_11NG_HT40:
return "11ng-ht40";
case MODE_11AC_VHT20:
return "11ac-vht20";
case MODE_11AC_VHT40:
return "11ac-vht40";
case MODE_11AC_VHT80:
return "11ac-vht80";
case MODE_11AC_VHT20_2G:
return "11ac-vht20-2g";
case MODE_11AC_VHT40_2G:
return "11ac-vht40-2g";
case MODE_11AC_VHT80_2G:
return "11ac-vht80-2g";
case MODE_UNKNOWN:
/* skip */
break;
/* no default handler to allow compiler to check that the
* enum is fully handled */
};
return "<unknown>";
}
#define WMI_CHAN_LIST_TAG 0x1
#define WMI_SSID_LIST_TAG 0x2
#define WMI_BSSID_LIST_TAG 0x3
......@@ -763,14 +805,6 @@ struct wmi_service_ready_event {
struct wlan_host_mem_req mem_reqs[1];
} __packed;
/*
* status consists of upper 16 bits fo int status and lower 16 bits of
* module ID that retuned status
*/
#define WLAN_INIT_STATUS_SUCCESS 0x0
#define WLAN_GET_INIT_STATUS_REASON(status) ((status) & 0xffff)
#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
......@@ -1268,7 +1302,7 @@ struct wmi_scan_event {
* good idea to pass all the fields in the RX status
* descriptor up to the host.
*/
struct wmi_mgmt_rx_hdr {
struct wmi_mgmt_rx_hdr_v1 {
__le32 channel;
__le32 snr;
__le32 rate;
......@@ -1277,8 +1311,18 @@ struct wmi_mgmt_rx_hdr {
__le32 status; /* %WMI_RX_STATUS_ */
} __packed;
struct wmi_mgmt_rx_event {
struct wmi_mgmt_rx_hdr hdr;
struct wmi_mgmt_rx_hdr_v2 {
struct wmi_mgmt_rx_hdr_v1 v1;
__le32 rssi_ctl[4];
} __packed;
struct wmi_mgmt_rx_event_v1 {
struct wmi_mgmt_rx_hdr_v1 hdr;
u8 buf[0];
} __packed;
struct wmi_mgmt_rx_event_v2 {
struct wmi_mgmt_rx_hdr_v2 hdr;
u8 buf[0];
} __packed;
......@@ -3000,7 +3044,6 @@ struct wmi_force_fw_hang_cmd {
#define WMI_MAX_EVENT 0x1000
/* Maximum number of pending TXed WMI packets */
#define WMI_MAX_PENDING_TX_COUNT 128
#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
/* By default disable power save for IBSS */
......@@ -3013,7 +3056,6 @@ int ath10k_wmi_attach(struct ath10k *ar);
void ath10k_wmi_detach(struct ath10k *ar);
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
void ath10k_wmi_flush_tx(struct ath10k *ar);
int ath10k_wmi_connect_htc_service(struct ath10k *ar);
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
......@@ -3066,7 +3108,8 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
enum wmi_ap_ps_peer_param param_id, u32 value);
int ath10k_wmi_scan_chan_list(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg);
int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
const struct wmi_bcn_tx_arg *arg);
int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg);
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
......
......@@ -37,12 +37,9 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath5k_hw *ah = common->priv;
struct platform_device *pdev = to_platform_device(ah->dev);
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u16 *eeprom, *eeprom_end;
bcfg = pdev->dev.platform_data;
eeprom = (u16 *) bcfg->radio;
eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
......@@ -57,7 +54,7 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
int ath5k_hw_read_srev(struct ath5k_hw *ah)
{
struct platform_device *pdev = to_platform_device(ah->dev);
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
ah->ah_mac_srev = bcfg->devid;
return 0;
}
......@@ -65,7 +62,7 @@ int ath5k_hw_read_srev(struct ath5k_hw *ah)
static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
{
struct platform_device *pdev = to_platform_device(ah->dev);
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u8 *cfg_mac;
if (to_platform_device(ah->dev)->id == 0)
......@@ -87,7 +84,7 @@ static const struct ath_bus_ops ath_ahb_bus_ops = {
/*Initialization*/
static int ath_ahb_probe(struct platform_device *pdev)
{
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ath5k_hw *ah;
struct ieee80211_hw *hw;
struct resource *res;
......@@ -96,7 +93,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
int ret = 0;
u32 reg;
if (!pdev->dev.platform_data) {
if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data specified\n");
ret = -EINVAL;
goto err_out;
......@@ -193,7 +190,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
static int ath_ahb_remove(struct platform_device *pdev)
{
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
struct ath5k_hw *ah;
u32 reg;
......
......@@ -54,7 +54,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
struct platform_device *pdev = to_platform_device(sc->dev);
struct ath9k_platform_data *pdata;
pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
pdata = dev_get_platdata(&pdev->dev);
if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
ath_err(common,
"%s: flash read failed, offset %08x is out of range\n",
......@@ -84,7 +84,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
struct ath_hw *ah;
char hw_name[64];
if (!pdev->dev.platform_data) {
if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data specified\n");
return -EINVAL;
}
......
......@@ -332,7 +332,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
}
if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
div_ant_conf->lna1_lna2_switch_delta)
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
else
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
......@@ -554,42 +554,22 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
break;
case 0x10: /* LNA2 A-B */
if ((antcomb->scan == 0) &&
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
ant_conf->fast_div_bias = 0x3f;
} else {
ant_conf->fast_div_bias = 0x1;
}
ant_conf->fast_div_bias = 0x2;
break;
case 0x12: /* LNA2 LNA1 */
ant_conf->fast_div_bias = 0x39;
ant_conf->fast_div_bias = 0x3f;
break;
case 0x13: /* LNA2 A+B */
if ((antcomb->scan == 0) &&
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
ant_conf->fast_div_bias = 0x3f;
} else {
ant_conf->fast_div_bias = 0x1;
}
ant_conf->fast_div_bias = 0x2;
break;
case 0x20: /* LNA1 A-B */
if ((antcomb->scan == 0) &&
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
ant_conf->fast_div_bias = 0x3f;
} else {
ant_conf->fast_div_bias = 0x4;
}
ant_conf->fast_div_bias = 0x3;
break;
case 0x21: /* LNA1 LNA2 */
ant_conf->fast_div_bias = 0x6;
ant_conf->fast_div_bias = 0x3;
break;
case 0x23: /* LNA1 A+B */
if ((antcomb->scan == 0) &&
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
ant_conf->fast_div_bias = 0x3f;
} else {
ant_conf->fast_div_bias = 0x6;
}
ant_conf->fast_div_bias = 0x3;
break;
case 0x30: /* A+B A-B */
ant_conf->fast_div_bias = 0x1;
......@@ -638,7 +618,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
antcomb->rssi_sub = alt_rssi_avg;
antcomb->scan = false;
if (antcomb->rssi_lna2 >
(antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
(antcomb->rssi_lna1 + conf->lna1_lna2_switch_delta)) {
/* use LNA2 as main LNA */
if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
(antcomb->rssi_add > antcomb->rssi_sub)) {
......
......@@ -626,12 +626,11 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
if (AR_SREV_9287_11_OR_LATER(ah))
val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
val |= AR_PCU_MISC_MODE2_CFP_IGNORE;
REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
}
REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
if (AR_SREV_9280_20_OR_LATER(ah))
return;
/*
......
......@@ -671,7 +671,7 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
if (ah->caldata)
nfcal_pending = ah->caldata->nfcal_pending;
nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
if (currCal && !nfcal &&
(currCal->calState == CAL_RUNNING ||
......@@ -861,7 +861,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
ar9002_hw_pa_cal(ah, true);
if (ah->caldata)
ah->caldata->nfcal_pending = true;
set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
......
......@@ -485,7 +485,7 @@ static void ar9002_hw_do_getnf(struct ath_hw *ah,
if (IS_CHAN_HT40(ah->curchan))
nfarray[3] = sign_extend32(nf, 8);
if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
if (!(ah->rxchainmask & BIT(1)))
return;
nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
......@@ -532,6 +532,7 @@ static void ar9002_hw_antdiv_comb_conf_get(struct ath_hw *ah,
AR_PHY_9285_ANT_DIV_ALT_LNACONF_S;
antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >>
AR_PHY_9285_FAST_DIV_BIAS_S;
antconf->lna1_lna2_switch_delta = -1;
antconf->lna1_lna2_delta = -3;
antconf->div_group = 0;
}
......
......@@ -52,6 +52,8 @@
#define AR9300_PAPRD_SCALE_2 0x70000000
#define AR9300_PAPRD_SCALE_2_S 28
#define AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE 0xc9
/* Delta from which to start power to pdadc table */
/* This offset is used in both open loop and closed loop power control
* schemes. In open loop power control, it is not really needed, but for
......
......@@ -364,6 +364,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar9565_1p0_modes_fast_clock);
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
ar9565_1p0_baseband_core_txfir_coeff_japan_2484);
} else {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
......@@ -628,6 +630,9 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9462_common_rx_gain_table_2p0);
else if (AR_SREV_9565(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9565_1p0_Common_rx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9300Common_rx_gain_table_2p2);
......
......@@ -753,9 +753,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
if (caldata) {
caldata->done_txiqcal_once = false;
caldata->done_txclcal_once = false;
caldata->rtt_done = false;
clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
clear_bit(RTT_DONE, &caldata->cal_flags);
}
if (!ath9k_hw_init_cal(ah, chan))
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -334,6 +334,8 @@ void ath9k_beacon_tasklet(unsigned long data)
if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
sc->beacon.bmisscnt++;
ath9k_hw_check_nav(ah);
if (!ath9k_hw_check_alive(ah))
ieee80211_queue_work(sc->hw, &sc->hw_check_work);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment