Commit e3037485 authored by Yan-Hsuan Chuang's avatar Yan-Hsuan Chuang Committed by Kalle Valo

rtw88: new Realtek 802.11ac driver

This is a new mac80211 driver for Realtek 802.11ac wireless network chips.
rtw88 now supports RTL8822BE/RTL8822CE now, with basic station mode
functionalities. The firmware for both can be found at linux-firmware.

https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
For RTL8822BE: rtw88/rtw8822b_fw.bin
For RTL8822CE: rtw88/rtw8822c_fw.bin

And for now, only PCI buses (RTL8xxxE) are supported. We will add support
for USB and SDIO in the future. The bus interface abstraction can be seen
in this driver such as hci.h. Most of the hardware setting are the same
except for some TRX path or probing setup should be separated.

Supported:

 * Basic STA/AP/ADHOC mode, and TDLS (STA is well tested)

Missing feature:

 * WOW/PNO
 * USB & SDIO bus (such as RTL8xxxU/RTL8xxxS)
 * BT coexistence (8822B/8822C are combo ICs)
 * Multiple interfaces (for now single STA is better supported)
 * Dynamic hardware calibrations (to improve/stabilize performance)

Potential problems:

 * static calibration spends too much time, and it is painful for
   driver to leave IDLE state. And slows down associate process.
   But reload function are under development, will be added soon!
 * TRX statictics misleading, as we are not reporting status correctly,
   or say, not reporting for "every" packet.

The next patch set should have BT coexistence code since RTL8822B/C are
combo ICs, and the driver for BT can be found after Linux Kernel v4.20.
So it is better to add it first to make WiFi + BT work concurrently.

Although now rtw88 is simple but we are developing more features for it.
Even we want to add support for more chips such as RTL8821C/RTL8814B.

Finally, rtw88 has many authors, listed alphabetically:

Ping-Ke Shih <pkshih@realtek.com>
Tzu-En Huang <tehuang@realtek.com>
Yan-Hsuan Chuang <yhchuang@realtek.com>
Reviewed-by: default avatarStanislaw Gruszka <sgruszka@redhat.com>
Reviewed-by: default avatarBrian Norris <briannorris@chromium.org>
Tested-by: default avatarBrian Norris <briannorris@chromium.org>
Signed-off-by: default avatarYan-Hsuan Chuang <yhchuang@realtek.com>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
parent c745f722
......@@ -13396,6 +13396,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.g
S: Maintained
F: drivers/net/wireless/realtek/rtlwifi/
REALTEK WIRELESS DRIVER (rtw88)
M: Yan-Hsuan Chuang <yhchuang@realtek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/realtek/rtw88/
RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
M: Jes Sorensen <Jes.Sorensen@gmail.com>
L: linux-wireless@vger.kernel.org
......
......@@ -14,5 +14,6 @@ if WLAN_VENDOR_REALTEK
source "drivers/net/wireless/realtek/rtl818x/Kconfig"
source "drivers/net/wireless/realtek/rtlwifi/Kconfig"
source "drivers/net/wireless/realtek/rtl8xxxu/Kconfig"
source "drivers/net/wireless/realtek/rtw88/Kconfig"
endif # WLAN_VENDOR_REALTEK
......@@ -6,4 +6,5 @@ obj-$(CONFIG_RTL8180) += rtl818x/
obj-$(CONFIG_RTL8187) += rtl818x/
obj-$(CONFIG_RTLWIFI) += rtlwifi/
obj-$(CONFIG_RTL8XXXU) += rtl8xxxu/
obj-$(CONFIG_RTW88) += rtw88/
menuconfig RTW88
tristate "Realtek 802.11ac wireless chips support"
depends on MAC80211
help
This module adds support for mac80211-based wireless drivers that
enables Realtek IEEE 802.11ac wireless chipsets.
If you choose to build a module, it'll be called rtw88.
if RTW88
config RTW88_CORE
tristate
config RTW88_PCI
tristate
config RTW88_8822BE
bool "Realtek 8822BE PCI wireless network adapter"
depends on PCI
select RTW88_CORE
select RTW88_PCI
help
Select this option will enable support for 8822BE chipset
802.11ac PCIe wireless network adapter
config RTW88_8822CE
bool "Realtek 8822CE PCI wireless network adapter"
depends on PCI
select RTW88_CORE
select RTW88_PCI
help
Select this option will enable support for 8822CE chipset
802.11ac PCIe wireless network adapter
config RTW88_DEBUG
bool "Realtek rtw88 debug support"
depends on RTW88_CORE
help
Enable debug support
If unsure, say Y to simplify debug problems
config RTW88_DEBUGFS
bool "Realtek rtw88 debugfs support"
depends on RTW88_CORE
help
Enable debug support
If unsure, say Y to simplify debug problems
endif
obj-$(CONFIG_RTW88_CORE) += rtw88.o
rtw88-y += main.o \
mac80211.o \
util.o \
debug.o \
tx.o \
rx.o \
mac.o \
phy.o \
efuse.o \
fw.o \
ps.o \
sec.o \
regd.o
rtw88-$(CONFIG_RTW88_8822BE) += rtw8822b.o rtw8822b_table.o
rtw88-$(CONFIG_RTW88_8822CE) += rtw8822c.o rtw8822c_table.o
obj-$(CONFIG_RTW88_PCI) += rtwpci.o
rtwpci-objs := pci.o
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include "main.h"
#include "sec.h"
#include "fw.h"
#include "debug.h"
#ifdef CONFIG_RTW88_DEBUGFS
struct rtw_debugfs_priv {
struct rtw_dev *rtwdev;
int (*cb_read)(struct seq_file *m, void *v);
ssize_t (*cb_write)(struct file *filp, const char __user *buffer,
size_t count, loff_t *loff);
union {
u32 cb_data;
u8 *buf;
struct {
u32 page_offset;
u32 page_num;
} rsvd_page;
struct {
u8 rf_path;
u32 rf_addr;
u32 rf_mask;
};
struct {
u32 addr;
u32 len;
} read_reg;
};
};
static int rtw_debugfs_single_show(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
return debugfs_priv->cb_read(m, v);
}
static ssize_t rtw_debugfs_common_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
{
struct rtw_debugfs_priv *debugfs_priv = filp->private_data;
return debugfs_priv->cb_write(filp, buffer, count, loff);
}
static ssize_t rtw_debugfs_single_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
{
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
return debugfs_priv->cb_write(filp, buffer, count, loff);
}
static int rtw_debugfs_single_open_rw(struct inode *inode, struct file *filp)
{
return single_open(filp, rtw_debugfs_single_show, inode->i_private);
}
static int rtw_debugfs_close(struct inode *inode, struct file *filp)
{
return 0;
}
static const struct file_operations file_ops_single_r = {
.owner = THIS_MODULE,
.open = rtw_debugfs_single_open_rw,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations file_ops_single_rw = {
.owner = THIS_MODULE,
.open = rtw_debugfs_single_open_rw,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek,
.write = rtw_debugfs_single_write,
};
static const struct file_operations file_ops_common_write = {
.owner = THIS_MODULE,
.write = rtw_debugfs_common_write,
.open = simple_open,
.release = rtw_debugfs_close,
};
static int rtw_debugfs_get_read_reg(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
u32 val, len, addr;
len = debugfs_priv->read_reg.len;
addr = debugfs_priv->read_reg.addr;
switch (len) {
case 1:
val = rtw_read8(rtwdev, addr);
seq_printf(m, "reg 0x%03x: 0x%02x\n", addr, val);
break;
case 2:
val = rtw_read16(rtwdev, addr);
seq_printf(m, "reg 0x%03x: 0x%04x\n", addr, val);
break;
case 4:
val = rtw_read32(rtwdev, addr);
seq_printf(m, "reg 0x%03x: 0x%08x\n", addr, val);
break;
}
return 0;
}
static int rtw_debugfs_get_rf_read(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
u32 val, addr, mask;
u8 path;
path = debugfs_priv->rf_path;
addr = debugfs_priv->rf_addr;
mask = debugfs_priv->rf_mask;
val = rtw_read_rf(rtwdev, path, addr, mask);
seq_printf(m, "rf_read path:%d addr:0x%08x mask:0x%08x val=0x%08x\n",
path, addr, mask, val);
return 0;
}
static int rtw_debugfs_copy_from_user(char tmp[], int size,
const char __user *buffer, size_t count,
int num)
{
int tmp_len;
if (count < num)
return -EFAULT;
tmp_len = (count > size - 1 ? size - 1 : count);
if (!buffer || copy_from_user(tmp, buffer, tmp_len))
return count;
tmp[tmp_len] = '\0';
return 0;
}
static ssize_t rtw_debugfs_set_read_reg(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
{
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
char tmp[32 + 1];
u32 addr, len;
int num;
rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2);
num = sscanf(tmp, "%x %x", &addr, &len);
if (num != 2)
return count;
if (len != 1 && len != 2 && len != 4) {
rtw_warn(rtwdev, "read reg setting wrong len\n");
return -EINVAL;
}
debugfs_priv->read_reg.addr = addr;
debugfs_priv->read_reg.len = len;
return count;
}
static int rtw_debugfs_get_dump_cam(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
u32 val, command;
u32 hw_key_idx = debugfs_priv->cb_data << RTW_SEC_CAM_ENTRY_SHIFT;
u32 read_cmd = RTW_SEC_CMD_POLLING;
int i;
seq_printf(m, "cam entry%d\n", debugfs_priv->cb_data);
seq_puts(m, "0x0 0x1 0x2 0x3 ");
seq_puts(m, "0x4 0x5\n");
mutex_lock(&rtwdev->mutex);
for (i = 0; i <= 5; i++) {
command = read_cmd | (hw_key_idx + i);
rtw_write32(rtwdev, RTW_SEC_CMD_REG, command);
val = rtw_read32(rtwdev, RTW_SEC_READ_REG);
seq_printf(m, "%8.8x", val);
if (i < 2)
seq_puts(m, " ");
}
seq_puts(m, "\n");
mutex_unlock(&rtwdev->mutex);
return 0;
}
static int rtw_debugfs_get_rsvd_page(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
u8 page_size = rtwdev->chip->page_size;
u32 buf_size = debugfs_priv->rsvd_page.page_num * page_size;
u32 offset = debugfs_priv->rsvd_page.page_offset * page_size;
u8 *buf;
int i;
int ret;
buf = vzalloc(buf_size);
if (!buf)
return -ENOMEM;
ret = rtw_dump_drv_rsvd_page(rtwdev, offset, buf_size, (u32 *)buf);
if (ret) {
rtw_err(rtwdev, "failed to dump rsvd page\n");
vfree(buf);
return ret;
}
for (i = 0 ; i < buf_size ; i += 8) {
if (i % page_size == 0)
seq_printf(m, "PAGE %d\n", (i + offset) / page_size);
seq_printf(m, "%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
*(buf + i), *(buf + i + 1),
*(buf + i + 2), *(buf + i + 3),
*(buf + i + 4), *(buf + i + 5),
*(buf + i + 6), *(buf + i + 7));
}
vfree(buf);
return 0;
}
static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
{
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
char tmp[32 + 1];
u32 offset, page_num;
int num;
rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2);
num = sscanf(tmp, "%d %d", &offset, &page_num);
if (num != 2) {
rtw_warn(rtwdev, "invalid arguments\n");
return num;
}
debugfs_priv->rsvd_page.page_offset = offset;
debugfs_priv->rsvd_page.page_num = page_num;
return count;
}
static ssize_t rtw_debugfs_set_single_input(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
{
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
char tmp[32 + 1];
u32 input;
int num;
rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
num = kstrtoint(tmp, 0, &input);
if (num) {
rtw_warn(rtwdev, "kstrtoint failed\n");
return num;
}
debugfs_priv->cb_data = input;
return count;
}
static ssize_t rtw_debugfs_set_write_reg(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
{
struct rtw_debugfs_priv *debugfs_priv = filp->private_data;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
char tmp[32 + 1];
u32 addr, val, len;
int num;
rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3);
/* write BB/MAC register */
num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
if (num != 3)
return count;
switch (len) {
case 1:
rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
"reg write8 0x%03x: 0x%08x\n", addr, val);
rtw_write8(rtwdev, addr, (u8)val);
break;
case 2:
rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
"reg write16 0x%03x: 0x%08x\n", addr, val);
rtw_write16(rtwdev, addr, (u16)val);
break;
case 4:
rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
"reg write32 0x%03x: 0x%08x\n", addr, val);
rtw_write32(rtwdev, addr, (u32)val);
break;
default:
rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
"error write length = %d\n", len);
break;
}
return count;
}
static ssize_t rtw_debugfs_set_rf_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
{
struct rtw_debugfs_priv *debugfs_priv = filp->private_data;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
char tmp[32 + 1];
u32 path, addr, mask, val;
int num;
rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 4);
num = sscanf(tmp, "%x %x %x %x", &path, &addr, &mask, &val);
if (num != 4) {
rtw_warn(rtwdev, "invalid args, [path] [addr] [mask] [val]\n");
return count;
}
rtw_write_rf(rtwdev, path, addr, mask, val);
rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
"write_rf path:%d addr:0x%08x mask:0x%08x, val:0x%08x\n",
path, addr, mask, val);
return count;
}
static ssize_t rtw_debugfs_set_rf_read(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
{
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
char tmp[32 + 1];
u32 path, addr, mask;
int num;
rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3);
num = sscanf(tmp, "%x %x %x", &path, &addr, &mask);
if (num != 3) {
rtw_warn(rtwdev, "invalid args, [path] [addr] [mask] [val]\n");
return count;
}
debugfs_priv->rf_path = path;
debugfs_priv->rf_addr = addr;
debugfs_priv->rf_mask = mask;
return count;
}
static int rtw_debug_get_mac_page(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
u32 val;
u32 page = debugfs_priv->cb_data;
int i, n;
int max = 0xff;
val = rtw_read32(rtwdev, debugfs_priv->cb_data);
for (n = 0; n <= max; ) {
seq_printf(m, "\n%8.8x ", n + page);
for (i = 0; i < 4 && n <= max; i++, n += 4)
seq_printf(m, "%8.8x ",
rtw_read32(rtwdev, (page | n)));
}
seq_puts(m, "\n");
return 0;
}
static int rtw_debug_get_bb_page(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
u32 val;
u32 page = debugfs_priv->cb_data;
int i, n;
int max = 0xff;
val = rtw_read32(rtwdev, debugfs_priv->cb_data);
for (n = 0; n <= max; ) {
seq_printf(m, "\n%8.8x ", n + page);
for (i = 0; i < 4 && n <= max; i++, n += 4)
seq_printf(m, "%8.8x ",
rtw_read32(rtwdev, (page | n)));
}
seq_puts(m, "\n");
return 0;
}
static int rtw_debug_get_rf_dump(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
u32 addr, offset, data;
u8 path;
for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
seq_printf(m, "RF path:%d\n", path);
for (addr = 0; addr < 0x100; addr += 4) {
seq_printf(m, "%8.8x ", addr);
for (offset = 0; offset < 4; offset++) {
data = rtw_read_rf(rtwdev, path, addr + offset,
0xffffffff);
seq_printf(m, "%8.8x ", data);
}
seq_puts(m, "\n");
}
seq_puts(m, "\n");
}
return 0;
}
#define rtw_debug_impl_mac(page, addr) \
static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \
.cb_read = rtw_debug_get_mac_page, \
.cb_data = addr, \
}
rtw_debug_impl_mac(0, 0x0000);
rtw_debug_impl_mac(1, 0x0100);
rtw_debug_impl_mac(2, 0x0200);
rtw_debug_impl_mac(3, 0x0300);
rtw_debug_impl_mac(4, 0x0400);
rtw_debug_impl_mac(5, 0x0500);
rtw_debug_impl_mac(6, 0x0600);
rtw_debug_impl_mac(7, 0x0700);
rtw_debug_impl_mac(10, 0x1000);
rtw_debug_impl_mac(11, 0x1100);
rtw_debug_impl_mac(12, 0x1200);
rtw_debug_impl_mac(13, 0x1300);
rtw_debug_impl_mac(14, 0x1400);
rtw_debug_impl_mac(15, 0x1500);
rtw_debug_impl_mac(16, 0x1600);
rtw_debug_impl_mac(17, 0x1700);
#define rtw_debug_impl_bb(page, addr) \
static struct rtw_debugfs_priv rtw_debug_priv_bb_ ##page = { \
.cb_read = rtw_debug_get_bb_page, \
.cb_data = addr, \
}
rtw_debug_impl_bb(8, 0x0800);
rtw_debug_impl_bb(9, 0x0900);
rtw_debug_impl_bb(a, 0x0a00);
rtw_debug_impl_bb(b, 0x0b00);
rtw_debug_impl_bb(c, 0x0c00);
rtw_debug_impl_bb(d, 0x0d00);
rtw_debug_impl_bb(e, 0x0e00);
rtw_debug_impl_bb(f, 0x0f00);
rtw_debug_impl_bb(18, 0x1800);
rtw_debug_impl_bb(19, 0x1900);
rtw_debug_impl_bb(1a, 0x1a00);
rtw_debug_impl_bb(1b, 0x1b00);
rtw_debug_impl_bb(1c, 0x1c00);
rtw_debug_impl_bb(1d, 0x1d00);
rtw_debug_impl_bb(1e, 0x1e00);
rtw_debug_impl_bb(1f, 0x1f00);
rtw_debug_impl_bb(2c, 0x2c00);
rtw_debug_impl_bb(2d, 0x2d00);
rtw_debug_impl_bb(40, 0x4000);
rtw_debug_impl_bb(41, 0x4100);
static struct rtw_debugfs_priv rtw_debug_priv_rf_dump = {
.cb_read = rtw_debug_get_rf_dump,
};
static struct rtw_debugfs_priv rtw_debug_priv_write_reg = {
.cb_write = rtw_debugfs_set_write_reg,
};
static struct rtw_debugfs_priv rtw_debug_priv_rf_write = {
.cb_write = rtw_debugfs_set_rf_write,
};
static struct rtw_debugfs_priv rtw_debug_priv_rf_read = {
.cb_write = rtw_debugfs_set_rf_read,
.cb_read = rtw_debugfs_get_rf_read,
};
static struct rtw_debugfs_priv rtw_debug_priv_read_reg = {
.cb_write = rtw_debugfs_set_read_reg,
.cb_read = rtw_debugfs_get_read_reg,
};
static struct rtw_debugfs_priv rtw_debug_priv_dump_cam = {
.cb_write = rtw_debugfs_set_single_input,
.cb_read = rtw_debugfs_get_dump_cam,
};
static struct rtw_debugfs_priv rtw_debug_priv_rsvd_page = {
.cb_write = rtw_debugfs_set_rsvd_page,
.cb_read = rtw_debugfs_get_rsvd_page,
};
#define rtw_debugfs_add_core(name, mode, fopname, parent) \
do { \
rtw_debug_priv_ ##name.rtwdev = rtwdev; \
if (!debugfs_create_file(#name, mode, \
parent, &rtw_debug_priv_ ##name,\
&file_ops_ ##fopname)) \
pr_debug("Unable to initialize debugfs:%s\n", \
#name); \
} while (0)
#define rtw_debugfs_add_w(name) \
rtw_debugfs_add_core(name, S_IFREG | 0222, common_write, debugfs_topdir)
#define rtw_debugfs_add_rw(name) \
rtw_debugfs_add_core(name, S_IFREG | 0666, single_rw, debugfs_topdir)
#define rtw_debugfs_add_r(name) \
rtw_debugfs_add_core(name, S_IFREG | 0444, single_r, debugfs_topdir)
void rtw_debugfs_init(struct rtw_dev *rtwdev)
{
struct dentry *debugfs_topdir = rtwdev->debugfs;
debugfs_topdir = debugfs_create_dir("rtw88",
rtwdev->hw->wiphy->debugfsdir);
rtw_debugfs_add_w(write_reg);
rtw_debugfs_add_rw(read_reg);
rtw_debugfs_add_w(rf_write);
rtw_debugfs_add_rw(rf_read);
rtw_debugfs_add_rw(dump_cam);
rtw_debugfs_add_rw(rsvd_page);
rtw_debugfs_add_r(mac_0);
rtw_debugfs_add_r(mac_1);
rtw_debugfs_add_r(mac_2);
rtw_debugfs_add_r(mac_3);
rtw_debugfs_add_r(mac_4);
rtw_debugfs_add_r(mac_5);
rtw_debugfs_add_r(mac_6);
rtw_debugfs_add_r(mac_7);
rtw_debugfs_add_r(bb_8);
rtw_debugfs_add_r(bb_9);
rtw_debugfs_add_r(bb_a);
rtw_debugfs_add_r(bb_b);
rtw_debugfs_add_r(bb_c);
rtw_debugfs_add_r(bb_d);
rtw_debugfs_add_r(bb_e);
rtw_debugfs_add_r(bb_f);
rtw_debugfs_add_r(mac_10);
rtw_debugfs_add_r(mac_11);
rtw_debugfs_add_r(mac_12);
rtw_debugfs_add_r(mac_13);
rtw_debugfs_add_r(mac_14);
rtw_debugfs_add_r(mac_15);
rtw_debugfs_add_r(mac_16);
rtw_debugfs_add_r(mac_17);
rtw_debugfs_add_r(bb_18);
rtw_debugfs_add_r(bb_19);
rtw_debugfs_add_r(bb_1a);
rtw_debugfs_add_r(bb_1b);
rtw_debugfs_add_r(bb_1c);
rtw_debugfs_add_r(bb_1d);
rtw_debugfs_add_r(bb_1e);
rtw_debugfs_add_r(bb_1f);
if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C) {
rtw_debugfs_add_r(bb_2c);
rtw_debugfs_add_r(bb_2d);
rtw_debugfs_add_r(bb_40);
rtw_debugfs_add_r(bb_41);
}
rtw_debugfs_add_r(rf_dump);
}
#endif /* CONFIG_RTW88_DEBUGFS */
#ifdef CONFIG_RTW88_DEBUG
void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
va_start(args, fmt);
vaf.va = &args;
if (rtw_debug_mask & mask)
dev_printk(KERN_DEBUG, rtwdev->dev, "%pV", &vaf);
va_end(args);
}
EXPORT_SYMBOL(__rtw_dbg);
#endif /* CONFIG_RTW88_DEBUG */
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW_DEBUG_H
#define __RTW_DEBUG_H
enum rtw_debug_mask {
RTW_DBG_PCI = 0x00000001,
RTW_DBG_TX = 0x00000002,
RTW_DBG_RX = 0x00000004,
RTW_DBG_PHY = 0x00000008,
RTW_DBG_FW = 0x00000010,
RTW_DBG_EFUSE = 0x00000020,
RTW_DBG_COEX = 0x00000040,
RTW_DBG_RFK = 0x00000080,
RTW_DBG_REGD = 0x00000100,
RTW_DBG_DEBUGFS = 0x00000200,
RTW_DBG_ALL = 0xffffffff
};
#ifdef CONFIG_RTW88_DEBUGFS
void rtw_debugfs_init(struct rtw_dev *rtwdev);
#else
static inline void rtw_debugfs_init(struct rtw_dev *rtwdev) {}
#endif /* CONFIG_RTW88_DEBUGFS */
#ifdef CONFIG_RTW88_DEBUG
__printf(3, 4)
void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
const char *fmt, ...);
#define rtw_dbg(rtwdev, a...) __rtw_dbg(rtwdev, ##a)
#else
static inline void rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
const char *fmt, ...) {}
#endif /* CONFIG_RTW88_DEBUG */
#define rtw_info(rtwdev, a...) dev_info(rtwdev->dev, ##a)
#define rtw_warn(rtwdev, a...) dev_warn(rtwdev->dev, ##a)
#define rtw_err(rtwdev, a...) dev_err(rtwdev->dev, ##a)
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include "main.h"
#include "efuse.h"
#include "reg.h"
#include "debug.h"
#define RTW_EFUSE_BANK_WIFI 0x0
static void switch_efuse_bank(struct rtw_dev *rtwdev)
{
rtw_write32_mask(rtwdev, REG_LDO_EFUSE_CTRL, BIT_MASK_EFUSE_BANK_SEL,
RTW_EFUSE_BANK_WIFI);
}
#define invalid_efuse_header(hdr1, hdr2) \
((hdr1) == 0xff || (((hdr1) & 0x1f) == 0xf && (hdr2) == 0xff))
#define invalid_efuse_content(word_en, i) \
(((word_en) & BIT(i)) != 0x0)
#define get_efuse_blk_idx_2_byte(hdr1, hdr2) \
((((hdr2) & 0xf0) >> 1) | (((hdr1) >> 5) & 0x07))
#define get_efuse_blk_idx_1_byte(hdr1) \
(((hdr1) & 0xf0) >> 4)
#define block_idx_to_logical_idx(blk_idx, i) \
(((blk_idx) << 3) + ((i) << 1))
/* efuse header format
*
* | 7 5 4 0 | 7 4 3 0 | 15 8 7 0 |
* block[2:0] 0 1111 block[6:3] word_en[3:0] byte0 byte1
* | header 1 (optional) | header 2 | word N |
*
* word_en: 4 bits each word. 0 -> write; 1 -> not write
* N: 1~4, depends on word_en
*/
static int rtw_dump_logical_efuse_map(struct rtw_dev *rtwdev, u8 *phy_map,
u8 *log_map)
{
u32 physical_size = rtwdev->efuse.physical_size;
u32 protect_size = rtwdev->efuse.protect_size;
u32 logical_size = rtwdev->efuse.logical_size;
u32 phy_idx, log_idx;
u8 hdr1, hdr2;
u8 blk_idx;
u8 word_en;
int i;
for (phy_idx = 0; phy_idx < physical_size - protect_size;) {
hdr1 = phy_map[phy_idx];
hdr2 = phy_map[phy_idx + 1];
if (invalid_efuse_header(hdr1, hdr2))
break;
if ((hdr1 & 0x1f) == 0xf) {
/* 2-byte header format */
blk_idx = get_efuse_blk_idx_2_byte(hdr1, hdr2);
word_en = hdr2 & 0xf;
phy_idx += 2;
} else {
/* 1-byte header format */
blk_idx = get_efuse_blk_idx_1_byte(hdr1);
word_en = hdr1 & 0xf;
phy_idx += 1;
}
for (i = 0; i < 4; i++) {
if (invalid_efuse_content(word_en, i))
continue;
log_idx = block_idx_to_logical_idx(blk_idx, i);
if (phy_idx + 1 > physical_size - protect_size ||
log_idx + 1 > logical_size)
return -EINVAL;
log_map[log_idx] = phy_map[phy_idx];
log_map[log_idx + 1] = phy_map[phy_idx + 1];
phy_idx += 2;
}
}
return 0;
}
static int rtw_dump_physical_efuse_map(struct rtw_dev *rtwdev, u8 *map)
{
struct rtw_chip_info *chip = rtwdev->chip;
u32 size = rtwdev->efuse.physical_size;
u32 efuse_ctl;
u32 addr;
u32 cnt;
switch_efuse_bank(rtwdev);
/* disable 2.5V LDO */
chip->ops->cfg_ldo25(rtwdev, false);
efuse_ctl = rtw_read32(rtwdev, REG_EFUSE_CTRL);
for (addr = 0; addr < size; addr++) {
efuse_ctl &= ~(BIT_MASK_EF_DATA | BITS_EF_ADDR);
efuse_ctl |= (addr & BIT_MASK_EF_ADDR) << BIT_SHIFT_EF_ADDR;
rtw_write32(rtwdev, REG_EFUSE_CTRL, efuse_ctl & (~BIT_EF_FLAG));
cnt = 1000000;
do {
udelay(1);
efuse_ctl = rtw_read32(rtwdev, REG_EFUSE_CTRL);
if (--cnt == 0)
return -EBUSY;
} while (!(efuse_ctl & BIT_EF_FLAG));
*(map + addr) = (u8)(efuse_ctl & BIT_MASK_EF_DATA);
}
return 0;
}
int rtw_parse_efuse_map(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
u32 phy_size = efuse->physical_size;
u32 log_size = efuse->logical_size;
u8 *phy_map = NULL;
u8 *log_map = NULL;
int ret = 0;
phy_map = kmalloc(phy_size, GFP_KERNEL);
log_map = kmalloc(log_size, GFP_KERNEL);
if (!phy_map || !log_map) {
ret = -ENOMEM;
goto out_free;
}
ret = rtw_dump_physical_efuse_map(rtwdev, phy_map);
if (ret) {
rtw_err(rtwdev, "failed to dump efuse physical map\n");
goto out_free;
}
memset(log_map, 0xff, log_size);
ret = rtw_dump_logical_efuse_map(rtwdev, phy_map, log_map);
if (ret) {
rtw_err(rtwdev, "failed to dump efuse logical map\n");
goto out_free;
}
ret = chip->ops->read_efuse(rtwdev, log_map);
if (ret) {
rtw_err(rtwdev, "failed to read efuse map\n");
goto out_free;
}
out_free:
kfree(log_map);
kfree(phy_map);
return ret;
}
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW_EFUSE_H__
#define __RTW_EFUSE_H__
#define EFUSE_HW_CAP_IGNORE 0
#define EFUSE_HW_CAP_PTCL_VHT 3
#define EFUSE_HW_CAP_SUPP_BW80 7
#define EFUSE_HW_CAP_SUPP_BW40 6
#define GET_EFUSE_HW_CAP_HCI(hw_cap) \
le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(3, 0))
#define GET_EFUSE_HW_CAP_BW(hw_cap) \
le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(18, 16))
#define GET_EFUSE_HW_CAP_NSS(hw_cap) \
le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(20, 19))
#define GET_EFUSE_HW_CAP_ANT_NUM(hw_cap) \
le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(23, 21))
#define GET_EFUSE_HW_CAP_PTCL(hw_cap) \
le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(27, 26))
int rtw_parse_efuse_map(struct rtw_dev *rtwdev);
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include "main.h"
#include "fw.h"
#include "tx.h"
#include "reg.h"
#include "debug.h"
void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev, struct sk_buff *skb)
{
struct rtw_c2h_cmd *c2h;
u8 sub_cmd_id;
c2h = get_c2h_from_skb(skb);
sub_cmd_id = c2h->payload[0];
switch (sub_cmd_id) {
case C2H_CCX_RPT:
rtw_tx_report_handle(rtwdev, skb);
break;
default:
break;
}
}
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
{
struct rtw_c2h_cmd *c2h;
u32 pkt_offset;
u8 len;
pkt_offset = *((u32 *)skb->cb);
c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
len = skb->len - pkt_offset - 2;
rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
c2h->id, c2h->seq, len);
switch (c2h->id) {
case C2H_HALMAC:
rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
break;
default:
break;
}
}
void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev, u8 *h2c)
{
u8 box;
u8 box_state;
u32 box_reg, box_ex_reg;
u32 h2c_wait;
int idx;
rtw_dbg(rtwdev, RTW_DBG_FW,
"send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n",
h2c[3], h2c[2], h2c[1], h2c[0],
h2c[7], h2c[6], h2c[5], h2c[4]);
spin_lock(&rtwdev->h2c.lock);
box = rtwdev->h2c.last_box_num;
switch (box) {
case 0:
box_reg = REG_HMEBOX0;
box_ex_reg = REG_HMEBOX0_EX;
break;
case 1:
box_reg = REG_HMEBOX1;
box_ex_reg = REG_HMEBOX1_EX;
break;
case 2:
box_reg = REG_HMEBOX2;
box_ex_reg = REG_HMEBOX2_EX;
break;
case 3:
box_reg = REG_HMEBOX3;
box_ex_reg = REG_HMEBOX3_EX;
break;
default:
WARN(1, "invalid h2c mail box number\n");
goto out;
}
h2c_wait = 20;
do {
box_state = rtw_read8(rtwdev, REG_HMETFR);
} while ((box_state >> box) & 0x1 && --h2c_wait > 0);
if (!h2c_wait) {
rtw_err(rtwdev, "failed to send h2c command\n");
goto out;
}
for (idx = 0; idx < 4; idx++)
rtw_write8(rtwdev, box_reg + idx, h2c[idx]);
for (idx = 0; idx < 4; idx++)
rtw_write8(rtwdev, box_ex_reg + idx, h2c[idx + 4]);
if (++rtwdev->h2c.last_box_num >= 4)
rtwdev->h2c.last_box_num = 0;
out:
spin_unlock(&rtwdev->h2c.lock);
}
static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt)
{
int ret;
spin_lock(&rtwdev->h2c.lock);
FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq);
ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE);
if (ret)
rtw_err(rtwdev, "failed to send h2c packet\n");
rtwdev->h2c.seq++;
spin_unlock(&rtwdev->h2c.lock);
}
void
rtw_fw_send_general_info(struct rtw_dev *rtwdev)
{
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
u16 total_size = H2C_PKT_HDR_SIZE + 4;
rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt,
fifo->rsvd_fw_txbuf_addr -
fifo->rsvd_boundary);
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
}
void
rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
u16 total_size = H2C_PKT_HDR_SIZE + 8;
u8 fw_rf_type = 0;
if (hal->rf_type == RF_1T1R)
fw_rf_type = FW_RF_1T1R;
else if (hal->rf_type == RF_2T2R)
fw_rf_type = FW_RF_2T2R;
rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO);
SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option);
PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type);
PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version);
PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx);
PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx);
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
}
void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
u16 total_size = H2C_PKT_HDR_SIZE + 1;
rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK);
SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
IQK_SET_CLEAR(h2c_pkt, para->clear);
IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk);
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
}
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
u8 rssi = ewma_rssi_read(&si->avg_rssi);
bool stbc_en = si->stbc_en ? true : false;
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR);
SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id);
SET_RSSI_INFO_RSSI(h2c_pkt, rssi);
SET_RSSI_INFO_STBC(h2c_pkt, stbc_en);
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
bool no_update = si->updated;
bool disable_pt = true;
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
SET_RA_INFO_MACID(h2c_pkt, si->mac_id);
SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id);
SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
SET_RA_INFO_LDPC(h2c_pkt, si->ldpc_en);
SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update);
SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff));
SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8);
SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16);
SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24);
si->init_ra_lv = 0;
si->updated = true;
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT);
MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect);
MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id);
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE);
SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode);
SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm);
SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps);
SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval);
SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id);
SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state);
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
enum rtw_rsvd_packet_type type)
{
struct rtw_rsvd_page *rsvd_pkt;
u8 location = 0;
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
if (type == rsvd_pkt->type)
location = rsvd_pkt->page;
}
return location;
}
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
u8 location = 0;
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE);
location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP);
*(h2c_pkt + 1) = location;
rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location);
location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL);
*(h2c_pkt + 2) = location;
rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location);
location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL);
*(h2c_pkt + 3) = location;
rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location);
location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL);
*(h2c_pkt + 4) = location;
rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location);
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
static struct sk_buff *
rtw_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct sk_buff *skb_new;
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC &&
!ieee80211_vif_is_mesh(vif)) {
skb_new = alloc_skb(1, GFP_KERNEL);
if (!skb_new)
return NULL;
skb_put(skb_new, 1);
} else {
skb_new = ieee80211_beacon_get(hw, vif);
}
return skb_new;
}
static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum rtw_rsvd_packet_type type)
{
struct sk_buff *skb_new;
switch (type) {
case RSVD_BEACON:
skb_new = rtw_beacon_get(hw, vif);
break;
case RSVD_PS_POLL:
skb_new = ieee80211_pspoll_get(hw, vif);
break;
case RSVD_PROBE_RESP:
skb_new = ieee80211_proberesp_get(hw, vif);
break;
case RSVD_NULL:
skb_new = ieee80211_nullfunc_get(hw, vif, false);
break;
case RSVD_QOS_NULL:
skb_new = ieee80211_nullfunc_get(hw, vif, true);
break;
default:
return NULL;
}
if (!skb_new)
return NULL;
return skb_new;
}
static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb)
{
struct rtw_tx_pkt_info pkt_info;
struct rtw_chip_info *chip = rtwdev->chip;
u8 *pkt_desc;
memset(&pkt_info, 0, sizeof(pkt_info));
rtw_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb);
pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
rtw_tx_fill_tx_desc(&pkt_info, skb);
}
static inline u8 rtw_len_to_page(unsigned int len, u8 page_size)
{
return DIV_ROUND_UP(len, page_size);
}
static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
u8 page_margin, u32 page, u8 *buf,
struct rtw_rsvd_page *rsvd_pkt)
{
struct sk_buff *skb = rsvd_pkt->skb;
if (rsvd_pkt->add_txdesc)
rtw_fill_rsvd_page_desc(rtwdev, skb);
if (page >= 1)
memcpy(buf + page_margin + page_size * (page - 1),
skb->data, skb->len);
else
memcpy(buf, skb->data, skb->len);
}
void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
bool txdesc)
{
struct rtw_rsvd_page *rsvd_pkt;
lockdep_assert_held(&rtwdev->mutex);
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
if (rsvd_pkt->type == type)
return;
}
rsvd_pkt = kmalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
if (!rsvd_pkt)
return;
rsvd_pkt->type = type;
rsvd_pkt->add_txdesc = txdesc;
list_add_tail(&rsvd_pkt->list, &rtwdev->rsvd_page_list);
}
void rtw_reset_rsvd_page(struct rtw_dev *rtwdev)
{
struct rtw_rsvd_page *rsvd_pkt, *tmp;
lockdep_assert_held(&rtwdev->mutex);
list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, list) {
if (rsvd_pkt->type == RSVD_BEACON)
continue;
list_del(&rsvd_pkt->list);
kfree(rsvd_pkt);
}
}
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 *buf, u32 size)
{
u8 bckp[2];
u8 val;
u16 rsvd_pg_head;
int ret;
lockdep_assert_held(&rtwdev->mutex);
if (!size)
return -EINVAL;
pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr | BIT_BCN_VALID_V1);
val = rtw_read8(rtwdev, REG_CR + 1);
bckp[0] = val;
val |= BIT_ENSWBCN >> 8;
rtw_write8(rtwdev, REG_CR + 1, val);
val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
bckp[1] = val;
val &= ~(BIT_EN_BCNQ_DL >> 16);
rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
if (ret) {
rtw_err(rtwdev, "failed to write data to rsvd page\n");
goto restore;
}
if (!check_hw_ready(rtwdev, REG_FIFOPAGE_CTRL_2, BIT_BCN_VALID_V1, 1)) {
rtw_err(rtwdev, "error beacon valid\n");
ret = -EBUSY;
}
restore:
rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
rsvd_pg_head | BIT_BCN_VALID_V1);
rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
return ret;
}
static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
u32 pg_size;
u32 pg_num = 0;
u16 pg_addr = 0;
pg_size = rtwdev->chip->page_size;
pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0);
if (pg_num > rtwdev->fifo.rsvd_drv_pg_num)
return -ENOMEM;
pg_addr = rtwdev->fifo.rsvd_drv_addr;
return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
}
static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
struct ieee80211_vif *vif, u32 *size)
{
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *iter;
struct rtw_rsvd_page *rsvd_pkt;
u32 page = 0;
u8 total_page = 0;
u8 page_size, page_margin, tx_desc_sz;
u8 *buf;
page_size = chip->page_size;
tx_desc_sz = chip->tx_pkt_desc_sz;
page_margin = page_size - tx_desc_sz;
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type);
if (!iter) {
rtw_err(rtwdev, "fail to build rsvd packet\n");
goto release_skb;
}
rsvd_pkt->skb = iter;
rsvd_pkt->page = total_page;
if (rsvd_pkt->add_txdesc)
total_page += rtw_len_to_page(iter->len + tx_desc_sz,
page_size);
else
total_page += rtw_len_to_page(iter->len, page_size);
}
if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
rtw_err(rtwdev, "rsvd page over size: %d\n", total_page);
goto release_skb;
}
*size = (total_page - 1) * page_size + page_margin;
buf = kzalloc(*size, GFP_KERNEL);
if (!buf)
goto release_skb;
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
page, buf, rsvd_pkt);
page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
}
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
kfree_skb(rsvd_pkt->skb);
return buf;
release_skb:
list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
kfree_skb(rsvd_pkt->skb);
return NULL;
}
static int
rtw_download_beacon(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
{
struct ieee80211_hw *hw = rtwdev->hw;
struct sk_buff *skb;
int ret = 0;
skb = rtw_beacon_get(hw, vif);
if (!skb) {
rtw_err(rtwdev, "failed to get beacon skb\n");
ret = -ENOMEM;
goto out;
}
ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
if (ret)
rtw_err(rtwdev, "failed to download drv rsvd page\n");
dev_kfree_skb(skb);
out:
return ret;
}
int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
{
u8 *buf;
u32 size;
int ret;
buf = rtw_build_rsvd_page(rtwdev, vif, &size);
if (!buf) {
rtw_err(rtwdev, "failed to build rsvd page pkt\n");
return -ENOMEM;
}
ret = rtw_download_drv_rsvd_page(rtwdev, buf, size);
if (ret) {
rtw_err(rtwdev, "failed to download drv rsvd page\n");
goto free;
}
ret = rtw_download_beacon(rtwdev, vif);
if (ret) {
rtw_err(rtwdev, "failed to download beacon\n");
goto free;
}
free:
kfree(buf);
return ret;
}
int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
u32 offset, u32 size, u32 *buf)
{
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
u32 residue, i;
u16 start_pg;
u16 idx = 0;
u16 ctl;
u8 rcr;
if (size & 0x3) {
rtw_warn(rtwdev, "should be 4-byte aligned\n");
return -EINVAL;
}
offset += fifo->rsvd_boundary << TX_PAGE_SIZE_SHIFT;
residue = offset & (FIFO_PAGE_SIZE - 1);
start_pg = offset >> FIFO_PAGE_SIZE_SHIFT;
start_pg += RSVD_PAGE_START_ADDR;
rcr = rtw_read8(rtwdev, REG_RCR + 2);
ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
/* disable rx clock gate */
rtw_write8(rtwdev, REG_RCR, rcr | BIT(3));
do {
rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
for (i = FIFO_DUMP_ADDR + residue;
i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) {
buf[idx++] = rtw_read32(rtwdev, i);
size -= 4;
if (size == 0)
goto out;
}
residue = 0;
start_pg++;
} while (size);
out:
rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
rtw_write8(rtwdev, REG_RCR + 2, rcr);
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW_FW_H_
#define __RTW_FW_H_
#define H2C_PKT_SIZE 32
#define H2C_PKT_HDR_SIZE 8
/* FW bin information */
#define FW_HDR_SIZE 64
#define FW_HDR_CHKSUM_SIZE 8
#define FW_HDR_VERSION 4
#define FW_HDR_SUBVERSION 6
#define FW_HDR_SUBINDEX 7
#define FW_HDR_MONTH 16
#define FW_HDR_DATE 17
#define FW_HDR_HOUR 18
#define FW_HDR_MIN 19
#define FW_HDR_YEAR 20
#define FW_HDR_MEM_USAGE 24
#define FW_HDR_H2C_FMT_VER 28
#define FW_HDR_DMEM_ADDR 32
#define FW_HDR_DMEM_SIZE 36
#define FW_HDR_IMEM_SIZE 48
#define FW_HDR_EMEM_SIZE 52
#define FW_HDR_EMEM_ADDR 56
#define FW_HDR_IMEM_ADDR 60
#define FIFO_PAGE_SIZE_SHIFT 12
#define FIFO_PAGE_SIZE 4096
#define RSVD_PAGE_START_ADDR 0x780
#define FIFO_DUMP_ADDR 0x8000
enum rtw_c2h_cmd_id {
C2H_BT_INFO = 0x09,
C2H_HW_FEATURE_REPORT = 0x19,
C2H_HW_FEATURE_DUMP = 0xfd,
C2H_HALMAC = 0xff,
};
enum rtw_c2h_cmd_id_ext {
C2H_CCX_RPT = 0x0f,
};
struct rtw_c2h_cmd {
u8 id;
u8 seq;
u8 payload[0];
} __packed;
enum rtw_rsvd_packet_type {
RSVD_BEACON,
RSVD_PS_POLL,
RSVD_PROBE_RESP,
RSVD_NULL,
RSVD_QOS_NULL,
};
enum rtw_fw_rf_type {
FW_RF_1T2R = 0,
FW_RF_2T4R = 1,
FW_RF_2T2R = 2,
FW_RF_2T3R = 3,
FW_RF_1T1R = 4,
FW_RF_2T2R_GREEN = 5,
FW_RF_3T3R = 6,
FW_RF_3T4R = 7,
FW_RF_4T4R = 8,
FW_RF_MAX_TYPE = 0xF,
};
struct rtw_iqk_para {
u8 clear;
u8 segment_iqk;
};
struct rtw_rsvd_page {
struct list_head list;
struct sk_buff *skb;
enum rtw_rsvd_packet_type type;
u8 page;
bool add_txdesc;
};
/* C2H */
#define GET_CCX_REPORT_SEQNUM(c2h_payload) (c2h_payload[8] & 0xfc)
#define GET_CCX_REPORT_STATUS(c2h_payload) (c2h_payload[9] & 0xc0)
/* PKT H2C */
#define H2C_PKT_CMD_ID 0xFF
#define H2C_PKT_CATEGORY 0x01
#define H2C_PKT_GENERAL_INFO 0x0D
#define H2C_PKT_PHYDM_INFO 0x11
#define H2C_PKT_IQK 0x0E
#define SET_PKT_H2C_CATEGORY(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(6, 0))
#define SET_PKT_H2C_CMD_ID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
#define SET_PKT_H2C_SUB_CMD_ID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 16))
#define SET_PKT_H2C_TOTAL_LEN(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 0))
static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
{
SET_PKT_H2C_CATEGORY(h2c_pkt, H2C_PKT_CATEGORY);
SET_PKT_H2C_CMD_ID(h2c_pkt, H2C_PKT_CMD_ID);
SET_PKT_H2C_SUB_CMD_ID(h2c_pkt, sub_id);
}
#define FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(31, 16))
#define GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16))
#define PHYDM_INFO_SET_REF_TYPE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(7, 0))
#define PHYDM_INFO_SET_RF_TYPE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 8))
#define PHYDM_INFO_SET_CUT_VER(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16))
#define PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(27, 24))
#define PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 28))
#define IQK_SET_CLEAR(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(0))
#define IQK_SET_SEGMENT_IQK(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1))
/* Command H2C */
#define H2C_CMD_RSVD_PAGE 0x0
#define H2C_CMD_MEDIA_STATUS_RPT 0x01
#define H2C_CMD_SET_PWR_MODE 0x20
#define H2C_CMD_RA_INFO 0x40
#define H2C_CMD_RSSI_MONITOR 0x42
#define SET_H2C_CMD_ID_CLASS(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(7, 0))
#define MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
#define MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
#define SET_PWR_MODE_SET_MODE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8))
#define SET_PWR_MODE_SET_RLBM(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(19, 16))
#define SET_PWR_MODE_SET_SMART_PS(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 20))
#define SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
#define SET_PWR_MODE_SET_PORT_ID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 5))
#define SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
#define SET_RSSI_INFO_MACID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
#define SET_RSSI_INFO_RSSI(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
#define SET_RSSI_INFO_STBC(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, BIT(1))
#define SET_RA_INFO_MACID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
#define SET_RA_INFO_RATE_ID(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(20, 16))
#define SET_RA_INFO_INIT_RA_LVL(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(22, 21))
#define SET_RA_INFO_SGI_EN(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(23))
#define SET_RA_INFO_BW_MODE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(25, 24))
#define SET_RA_INFO_LDPC(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(26))
#define SET_RA_INFO_NO_UPDATE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(27))
#define SET_RA_INFO_VHT_EN(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(29, 28))
#define SET_RA_INFO_DIS_PT(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(30))
#define SET_RA_INFO_RA_MASK0(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0))
#define SET_RA_INFO_RA_MASK1(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
#define SET_RA_INFO_RA_MASK2(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
#define SET_RA_INFO_RA_MASK3(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(31, 24))
static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
{
u32 pkt_offset;
pkt_offset = *((u32 *)skb->cb);
return (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
}
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
void rtw_fw_send_general_info(struct rtw_dev *rtwdev);
void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev);
void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para);
void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev);
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
bool txdesc);
int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
u8 *buf, u32 size);
void rtw_reset_rsvd_page(struct rtw_dev *rtwdev);
int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev,
struct ieee80211_vif *vif);
void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev);
int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
u32 offset, u32 size, u32 *buf);
#endif
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW_HCI_H__
#define __RTW_HCI_H__
/* ops for PCI, USB and SDIO */
struct rtw_hci_ops {
int (*tx)(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb);
int (*setup)(struct rtw_dev *rtwdev);
int (*start)(struct rtw_dev *rtwdev);
void (*stop)(struct rtw_dev *rtwdev);
int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
u8 (*read8)(struct rtw_dev *rtwdev, u32 addr);
u16 (*read16)(struct rtw_dev *rtwdev, u32 addr);
u32 (*read32)(struct rtw_dev *rtwdev, u32 addr);
void (*write8)(struct rtw_dev *rtwdev, u32 addr, u8 val);
void (*write16)(struct rtw_dev *rtwdev, u32 addr, u16 val);
void (*write32)(struct rtw_dev *rtwdev, u32 addr, u32 val);
};
static inline int rtw_hci_tx(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb)
{
return rtwdev->hci.ops->tx(rtwdev, pkt_info, skb);
}
static inline int rtw_hci_setup(struct rtw_dev *rtwdev)
{
return rtwdev->hci.ops->setup(rtwdev);
}
static inline int rtw_hci_start(struct rtw_dev *rtwdev)
{
return rtwdev->hci.ops->start(rtwdev);
}
static inline void rtw_hci_stop(struct rtw_dev *rtwdev)
{
rtwdev->hci.ops->stop(rtwdev);
}
static inline int
rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
return rtwdev->hci.ops->write_data_rsvd_page(rtwdev, buf, size);
}
static inline int
rtw_hci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
return rtwdev->hci.ops->write_data_h2c(rtwdev, buf, size);
}
static inline u8 rtw_read8(struct rtw_dev *rtwdev, u32 addr)
{
return rtwdev->hci.ops->read8(rtwdev, addr);
}
static inline u16 rtw_read16(struct rtw_dev *rtwdev, u32 addr)
{
return rtwdev->hci.ops->read16(rtwdev, addr);
}
static inline u32 rtw_read32(struct rtw_dev *rtwdev, u32 addr)
{
return rtwdev->hci.ops->read32(rtwdev, addr);
}
static inline void rtw_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
{
rtwdev->hci.ops->write8(rtwdev, addr, val);
}
static inline void rtw_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
{
rtwdev->hci.ops->write16(rtwdev, addr, val);
}
static inline void rtw_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
{
rtwdev->hci.ops->write32(rtwdev, addr, val);
}
static inline void rtw_write8_set(struct rtw_dev *rtwdev, u32 addr, u8 bit)
{
u8 val;
val = rtw_read8(rtwdev, addr);
rtw_write8(rtwdev, addr, val | bit);
}
static inline void rtw_writ16_set(struct rtw_dev *rtwdev, u32 addr, u16 bit)
{
u16 val;
val = rtw_read16(rtwdev, addr);
rtw_write16(rtwdev, addr, val | bit);
}
static inline void rtw_write32_set(struct rtw_dev *rtwdev, u32 addr, u32 bit)
{
u32 val;
val = rtw_read32(rtwdev, addr);
rtw_write32(rtwdev, addr, val | bit);
}
static inline void rtw_write8_clr(struct rtw_dev *rtwdev, u32 addr, u8 bit)
{
u8 val;
val = rtw_read8(rtwdev, addr);
rtw_write8(rtwdev, addr, val & ~bit);
}
static inline void rtw_write16_clr(struct rtw_dev *rtwdev, u32 addr, u16 bit)
{
u16 val;
val = rtw_read16(rtwdev, addr);
rtw_write16(rtwdev, addr, val & ~bit);
}
static inline void rtw_write32_clr(struct rtw_dev *rtwdev, u32 addr, u32 bit)
{
u32 val;
val = rtw_read32(rtwdev, addr);
rtw_write32(rtwdev, addr, val & ~bit);
}
static inline u32
rtw_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&rtwdev->rf_lock, flags);
val = rtwdev->chip->ops->read_rf(rtwdev, rf_path, addr, mask);
spin_unlock_irqrestore(&rtwdev->rf_lock, flags);
return val;
}
static inline void
rtw_write_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
unsigned long flags;
spin_lock_irqsave(&rtwdev->rf_lock, flags);
rtwdev->chip->ops->write_rf(rtwdev, rf_path, addr, mask, data);
spin_unlock_irqrestore(&rtwdev->rf_lock, flags);
}
static inline u32
rtw_read32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
{
u32 shift = __ffs(mask);
u32 orig;
u32 ret;
orig = rtw_read32(rtwdev, addr);
ret = (orig & mask) >> shift;
return ret;
}
static inline void
rtw_write32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
{
u32 shift = __ffs(mask);
u32 orig;
u32 set;
WARN(addr & 0x3, "should be 4-byte aligned, addr = 0x%08x\n", addr);
orig = rtw_read32(rtwdev, addr);
set = (orig & ~mask) | ((data << shift) & mask);
rtw_write32(rtwdev, addr, set);
}
static inline void
rtw_write8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u8 data)
{
u32 shift;
u8 orig, set;
mask &= 0xff;
shift = __ffs(mask);
orig = rtw_read8(rtwdev, addr);
set = (orig & ~mask) | ((data << shift) & mask);
rtw_write8(rtwdev, addr, set);
}
static inline enum rtw_hci_type rtw_hci_type(struct rtw_dev *rtwdev)
{
return rtwdev->hci.type;
}
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include "main.h"
#include "mac.h"
#include "reg.h"
#include "fw.h"
#include "debug.h"
void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 primary_ch_idx)
{
u8 txsc40 = 0, txsc20 = 0;
u32 value32;
u8 value8;
txsc20 = primary_ch_idx;
if (txsc20 == 1 || txsc20 == 3)
txsc40 = 9;
else
txsc40 = 10;
rtw_write8(rtwdev, REG_DATA_SC,
BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL);
value32 &= ~BIT_RFMOD;
switch (bw) {
case RTW_CHANNEL_WIDTH_80:
value32 |= BIT_RFMOD_80M;
break;
case RTW_CHANNEL_WIDTH_40:
value32 |= BIT_RFMOD_40M;
break;
case RTW_CHANNEL_WIDTH_20:
default:
break;
}
rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
rtw_write32(rtwdev, REG_AFE_CTRL1, value32);
rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
value8 = value8 & ~BIT_CHECK_CCK_EN;
if (channel > 35)
value8 |= BIT_CHECK_CCK_EN;
rtw_write8(rtwdev, REG_CCK_CHECK, value8);
}
static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
{
u32 value32;
u8 value8;
rtw_write8(rtwdev, REG_RSV_CTRL, 0);
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN);
break;
case RTW_HCI_TYPE_USB:
break;
default:
return -EINVAL;
}
/* config PIN Mux */
value32 = rtw_read32(rtwdev, REG_PAD_CTRL1);
value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL;
rtw_write32(rtwdev, REG_PAD_CTRL1, value32);
value32 = rtw_read32(rtwdev, REG_LED_CFG);
value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN);
rtw_write32(rtwdev, REG_LED_CFG, value32);
value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG);
value32 |= BIT_WLRFE_4_5_EN;
rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32);
/* disable BB/RF */
value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN);
value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8);
value8 = rtw_read8(rtwdev, REG_RF_CTRL);
value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN);
rtw_write8(rtwdev, REG_RF_CTRL, value8);
value32 = rtw_read32(rtwdev, REG_WLRF1);
value32 &= ~BIT_WLRF1_BBRF_EN;
rtw_write32(rtwdev, REG_WLRF1, value32);
return 0;
}
static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
struct rtw_pwr_seq_cmd *cmd)
{
u8 value;
u8 flag = 0;
u32 offset;
u32 cnt = RTW_PWR_POLLING_CNT;
if (cmd->base == RTW_PWR_ADDR_SDIO)
offset = cmd->offset | SDIO_LOCAL_OFFSET;
else
offset = cmd->offset;
do {
cnt--;
value = rtw_read8(rtwdev, offset);
value &= cmd->mask;
if (value == (cmd->value & cmd->mask))
return 0;
if (cnt == 0) {
if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE &&
flag == 0) {
value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
value |= BIT(3);
rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
value &= ~BIT(3);
rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
cnt = RTW_PWR_POLLING_CNT;
flag = 1;
} else {
return -EBUSY;
}
} else {
udelay(50);
}
} while (1);
}
static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
u8 cut_mask, struct rtw_pwr_seq_cmd *cmd)
{
struct rtw_pwr_seq_cmd *cur_cmd;
u32 offset;
u8 value;
for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) {
if (!(cur_cmd->intf_mask & intf_mask) ||
!(cur_cmd->cut_mask & cut_mask))
continue;
switch (cur_cmd->cmd) {
case RTW_PWR_CMD_WRITE:
offset = cur_cmd->offset;
if (cur_cmd->base == RTW_PWR_ADDR_SDIO)
offset |= SDIO_LOCAL_OFFSET;
value = rtw_read8(rtwdev, offset);
value &= ~cur_cmd->mask;
value |= (cur_cmd->value & cur_cmd->mask);
rtw_write8(rtwdev, offset, value);
break;
case RTW_PWR_CMD_POLLING:
if (rtw_pwr_cmd_polling(rtwdev, cur_cmd))
return -EBUSY;
break;
case RTW_PWR_CMD_DELAY:
if (cur_cmd->value == RTW_PWR_DELAY_US)
udelay(cur_cmd->offset);
else
mdelay(cur_cmd->offset);
break;
case RTW_PWR_CMD_READ:
break;
default:
return -EINVAL;
}
}
return 0;
}
static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
struct rtw_pwr_seq_cmd **cmd_seq)
{
u8 cut_mask;
u8 intf_mask;
u8 cut;
u32 idx = 0;
struct rtw_pwr_seq_cmd *cmd;
int ret;
cut = rtwdev->hal.cut_version;
cut_mask = cut_version_to_mask(cut);
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
intf_mask = BIT(2);
break;
case RTW_HCI_TYPE_USB:
intf_mask = BIT(1);
break;
default:
return -EINVAL;
}
do {
cmd = cmd_seq[idx];
if (!cmd)
break;
ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
if (ret)
return -EBUSY;
idx++;
} while (1);
return 0;
}
static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pwr_seq_cmd **pwr_seq;
u8 rpwm;
bool cur_pwr;
rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
/* Check FW still exist or not */
if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
}
if (rtw_read8(rtwdev, REG_CR) == 0xea)
cur_pwr = false;
else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
(rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
cur_pwr = false;
else
cur_pwr = true;
if (pwr_on && cur_pwr)
return -EALREADY;
pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
return -EINVAL;
return 0;
}
static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
{
u8 sys_func_en = rtwdev->chip->sys_func_en;
u8 value8;
u32 value, tmp;
value = rtw_read32(rtwdev, REG_CPU_DMEM_CON);
value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
rtw_write8(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
/* disable boot-from-flash for driver's DL FW */
tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL);
if (tmp & BIT_BOOT_FSPI_EN) {
rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN));
value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN);
rtw_write32(rtwdev, REG_GPIO_MUXCFG, value);
}
return 0;
}
int rtw_mac_power_on(struct rtw_dev *rtwdev)
{
int ret = 0;
ret = rtw_mac_pre_system_cfg(rtwdev);
if (ret)
goto err;
ret = rtw_mac_power_switch(rtwdev, true);
if (ret)
goto err;
ret = rtw_mac_init_system_cfg(rtwdev);
if (ret)
goto err;
return 0;
err:
rtw_err(rtwdev, "mac power on failed");
return ret;
}
void rtw_mac_power_off(struct rtw_dev *rtwdev)
{
rtw_mac_power_switch(rtwdev, false);
}
static bool check_firmware_size(const u8 *data, u32 size)
{
u32 dmem_size;
u32 imem_size;
u32 emem_size;
u32 real_size;
dmem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_SIZE)));
imem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_SIZE)));
emem_size = ((*(data + FW_HDR_MEM_USAGE)) & BIT(4)) ?
le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_SIZE))) : 0;
dmem_size += FW_HDR_CHKSUM_SIZE;
imem_size += FW_HDR_CHKSUM_SIZE;
emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size;
if (real_size != size)
return false;
return true;
}
static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable)
{
if (enable) {
/* cpu io interface enable */
rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
/* cpu enable */
rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
} else {
/* cpu io interface disable */
rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
/* cpu disable */
rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
}
}
#define DLFW_RESTORE_REG_NUM 6
static void download_firmware_reg_backup(struct rtw_dev *rtwdev,
struct rtw_backup_info *bckp)
{
u8 tmp;
u8 bckp_idx = 0;
/* set HIQ to hi priority */
bckp[bckp_idx].len = 1;
bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1;
bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1);
bckp_idx++;
tmp = RTW_DMA_MAPPING_HIGH << 6;
rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp);
/* DLFW only use HIQ, map HIQ to hi priority */
bckp[bckp_idx].len = 1;
bckp[bckp_idx].reg = REG_CR;
bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR);
bckp_idx++;
bckp[bckp_idx].len = 4;
bckp[bckp_idx].reg = REG_H2CQ_CSR;
bckp[bckp_idx].val = BIT_H2CQ_FULL;
bckp_idx++;
tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN;
rtw_write8(rtwdev, REG_CR, tmp);
rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
/* Config hi priority queue and public priority queue page number */
bckp[bckp_idx].len = 2;
bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1;
bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1);
bckp_idx++;
bckp[bckp_idx].len = 4;
bckp[bckp_idx].reg = REG_RQPN_CTRL_2;
bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN;
bckp_idx++;
rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200);
rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val);
/* Disable beacon related functions */
tmp = rtw_read8(rtwdev, REG_BCN_CTRL);
bckp[bckp_idx].len = 1;
bckp[bckp_idx].reg = REG_BCN_CTRL;
bckp[bckp_idx].val = tmp;
bckp_idx++;
tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT);
rtw_write8(rtwdev, REG_BCN_CTRL, tmp);
WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n");
}
static void download_firmware_reset_platform(struct rtw_dev *rtwdev)
{
rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
}
static void download_firmware_reg_restore(struct rtw_dev *rtwdev,
struct rtw_backup_info *bckp,
u8 bckp_num)
{
rtw_restore_reg(rtwdev, bckp, bckp_num);
}
#define TX_DESC_SIZE 48
static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
const u8 *data, u32 size)
{
u8 *buf;
int ret;
buf = kmemdup(data, size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
kfree(buf);
return ret;
}
static int
send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size)
{
int ret;
if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
!((size + TX_DESC_SIZE) & (512 - 1)))
size += 1;
ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size);
if (ret)
rtw_err(rtwdev, "failed to download rsvd page\n");
return ret;
}
static int
iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl)
{
rtw_write32(rtwdev, REG_DDMA_CH0SA, src);
rtw_write32(rtwdev, REG_DDMA_CH0DA, dst);
rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl);
if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
return -EBUSY;
return 0;
}
static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst,
u32 len, u8 first)
{
u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN;
if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
return -EBUSY;
ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN;
if (!first)
ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT;
if (iddma_enable(rtwdev, src, dst, ch0_ctrl))
return -EBUSY;
return 0;
}
static bool
check_fw_checksum(struct rtw_dev *rtwdev, u32 addr)
{
u8 fw_ctrl;
fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL);
if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) {
if (addr < OCPBASE_DMEM_88XX) {
fw_ctrl |= BIT_IMEM_DW_OK;
fw_ctrl &= ~BIT_IMEM_CHKSUM_OK;
rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
} else {
fw_ctrl |= BIT_DMEM_DW_OK;
fw_ctrl &= ~BIT_DMEM_CHKSUM_OK;
rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
}
rtw_err(rtwdev, "invalid fw checksum\n");
return false;
}
if (addr < OCPBASE_DMEM_88XX) {
fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK);
rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
} else {
fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK);
rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
}
return true;
}
static int
download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
u32 src, u32 dst, u32 size)
{
struct rtw_chip_info *chip = rtwdev->chip;
u32 desc_size = chip->tx_pkt_desc_sz;
u8 first_part;
u32 mem_offset;
u32 residue_size;
u32 pkt_size;
u32 max_size = 0x1000;
u32 val;
int ret;
mem_offset = 0;
first_part = 1;
residue_size = size;
val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL);
val |= BIT_DDMACH0_RESET_CHKSUM_STS;
rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val);
while (residue_size) {
if (residue_size >= max_size)
pkt_size = max_size;
else
pkt_size = residue_size;
ret = send_firmware_pkt(rtwdev, (u16)(src >> 7),
data + mem_offset, pkt_size);
if (ret)
return ret;
ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX +
src + desc_size,
dst + mem_offset, pkt_size,
first_part);
if (ret)
return ret;
first_part = 0;
mem_offset += pkt_size;
residue_size -= pkt_size;
}
if (!check_fw_checksum(rtwdev, dst))
return -EINVAL;
return 0;
}
static void update_firmware_info(struct rtw_dev *rtwdev,
struct rtw_fw_state *fw)
{
const u8 *data = fw->firmware->data;
fw->h2c_version =
le16_to_cpu(*((__le16 *)(data + FW_HDR_H2C_FMT_VER)));
fw->version =
le16_to_cpu(*((__le16 *)(data + FW_HDR_VERSION)));
fw->sub_version = *(data + FW_HDR_SUBVERSION);
fw->sub_index = *(data + FW_HDR_SUBINDEX);
rtw_dbg(rtwdev, RTW_DBG_FW, "fw h2c version: %x\n", fw->h2c_version);
rtw_dbg(rtwdev, RTW_DBG_FW, "fw version: %x\n", fw->version);
rtw_dbg(rtwdev, RTW_DBG_FW, "fw sub version: %x\n", fw->sub_version);
rtw_dbg(rtwdev, RTW_DBG_FW, "fw sub index: %x\n", fw->sub_index);
}
static int
start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
{
const u8 *cur_fw;
u16 val;
u32 imem_size;
u32 dmem_size;
u32 emem_size;
u32 addr;
int ret;
dmem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_SIZE)));
imem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_SIZE)));
emem_size = ((*(data + FW_HDR_MEM_USAGE)) & BIT(4)) ?
le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_SIZE))) : 0;
dmem_size += FW_HDR_CHKSUM_SIZE;
imem_size += FW_HDR_CHKSUM_SIZE;
emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800);
val |= BIT_MCUFWDL_EN;
rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
cur_fw = data + FW_HDR_SIZE;
addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_ADDR)));
addr &= ~BIT(31);
ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
if (ret)
return ret;
cur_fw = data + FW_HDR_SIZE + dmem_size;
addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_ADDR)));
addr &= ~BIT(31);
ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
if (ret)
return ret;
if (emem_size) {
cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_ADDR)));
addr &= ~BIT(31);
ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
emem_size);
if (ret)
return ret;
}
return 0;
}
static int download_firmware_validate(struct rtw_dev *rtwdev)
{
u32 fw_key;
if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) {
fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK;
if (fw_key == ILLEGAL_KEY_GROUP)
rtw_err(rtwdev, "invalid fw key\n");
return -EINVAL;
}
return 0;
}
static void download_firmware_end_flow(struct rtw_dev *rtwdev)
{
u16 fw_ctrl;
rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF);
/* Check IMEM & DMEM checksum is OK or not */
fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL);
if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK)
return;
fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN;
rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
}
int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
{
struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM];
const u8 *data = fw->firmware->data;
u32 size = fw->firmware->size;
u32 ltecoex_bckp;
int ret;
if (!check_firmware_size(data, size))
return -EINVAL;
if (!ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
return -EBUSY;
wlan_cpu_enable(rtwdev, false);
download_firmware_reg_backup(rtwdev, bckp);
download_firmware_reset_platform(rtwdev);
ret = start_download_firmware(rtwdev, data, size);
if (ret)
goto dlfw_fail;
download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM);
download_firmware_end_flow(rtwdev);
wlan_cpu_enable(rtwdev, true);
if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp))
return -EBUSY;
ret = download_firmware_validate(rtwdev);
if (ret)
goto dlfw_fail;
update_firmware_info(rtwdev, fw);
/* reset desc and index */
rtw_hci_setup(rtwdev);
rtwdev->h2c.last_box_num = 0;
rtwdev->h2c.seq = 0;
rtw_fw_send_general_info(rtwdev);
rtw_fw_send_phydm_info(rtwdev);
rtw_flag_set(rtwdev, RTW_FLAG_FW_RUNNING);
return 0;
dlfw_fail:
/* Disable FWDL_EN */
rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
return ret;
}
static int txdma_queue_mapping(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_rqpn *rqpn = NULL;
u16 txdma_pq_map = 0;
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
rqpn = &chip->rqpn_table[1];
break;
case RTW_HCI_TYPE_USB:
if (rtwdev->hci.bulkout_num == 2)
rqpn = &chip->rqpn_table[2];
else if (rtwdev->hci.bulkout_num == 3)
rqpn = &chip->rqpn_table[3];
else if (rtwdev->hci.bulkout_num == 4)
rqpn = &chip->rqpn_table[4];
else
return -EINVAL;
break;
default:
return -EINVAL;
}
txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be);
txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi);
txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo);
rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map);
rtw_write8(rtwdev, REG_CR, 0);
rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
return 0;
}
static int set_trx_fifo_info(struct rtw_dev *rtwdev)
{
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
struct rtw_chip_info *chip = rtwdev->chip;
u16 cur_pg_addr;
u8 csi_buf_pg_num = chip->csi_buf_pg_num;
/* config rsvd page num */
fifo->rsvd_drv_pg_num = 8;
fifo->txff_pg_num = chip->txff_size >> 7;
fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
RSVD_PG_H2C_EXTRAINFO_NUM +
RSVD_PG_H2C_STATICINFO_NUM +
RSVD_PG_H2CQ_NUM +
RSVD_PG_CPU_INSTRUCTION_NUM +
RSVD_PG_FW_TXBUF_NUM +
csi_buf_pg_num;
if (fifo->rsvd_pg_num > fifo->txff_pg_num)
return -ENOMEM;
fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num;
fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
cur_pg_addr = fifo->txff_pg_num;
cur_pg_addr -= csi_buf_pg_num;
fifo->rsvd_csibuf_addr = cur_pg_addr;
cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
fifo->rsvd_cpu_instr_addr = cur_pg_addr;
cur_pg_addr -= RSVD_PG_H2CQ_NUM;
fifo->rsvd_h2cq_addr = cur_pg_addr;
cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
fifo->rsvd_h2c_info_addr = cur_pg_addr;
cur_pg_addr -= fifo->rsvd_drv_pg_num;
fifo->rsvd_drv_addr = cur_pg_addr;
if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) {
rtw_err(rtwdev, "wrong rsvd driver address\n");
return -EINVAL;
}
return 0;
}
static int priority_queue_cfg(struct rtw_dev *rtwdev)
{
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_page_table *pg_tbl = NULL;
u16 pubq_num;
int ret;
ret = set_trx_fifo_info(rtwdev);
if (ret)
return ret;
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
pg_tbl = &chip->page_table[1];
break;
case RTW_HCI_TYPE_USB:
if (rtwdev->hci.bulkout_num == 2)
pg_tbl = &chip->page_table[2];
else if (rtwdev->hci.bulkout_num == 3)
pg_tbl = &chip->page_table[3];
else if (rtwdev->hci.bulkout_num == 4)
pg_tbl = &chip->page_table[4];
else
return -EINVAL;
break;
default:
return -EINVAL;
}
pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
return -EBUSY;
rtw_write8(rtwdev, REG_CR + 3, 0);
return 0;
}
static int init_h2c(struct rtw_dev *rtwdev)
{
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
u8 value8;
u32 value32;
u32 h2cq_addr;
u32 h2cq_size;
u32 h2cq_free;
u32 wp, rp;
h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT;
value32 = rtw_read32(rtwdev, REG_H2C_HEAD);
value32 = (value32 & 0xFFFC0000) | h2cq_addr;
rtw_write32(rtwdev, REG_H2C_HEAD, value32);
value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR);
value32 = (value32 & 0xFFFC0000) | h2cq_addr;
rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32);
value32 = rtw_read32(rtwdev, REG_H2C_TAIL);
value32 &= 0xFFFC0000;
value32 |= (h2cq_addr + h2cq_size);
rtw_write32(rtwdev, REG_H2C_TAIL, value32);
value8 = rtw_read8(rtwdev, REG_H2C_INFO);
value8 = (u8)((value8 & 0xFC) | 0x01);
rtw_write8(rtwdev, REG_H2C_INFO, value8);
value8 = rtw_read8(rtwdev, REG_H2C_INFO);
value8 = (u8)((value8 & 0xFB) | 0x04);
rtw_write8(rtwdev, REG_H2C_INFO, value8);
value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1);
value8 = (u8)((value8 & 0x7f) | 0x80);
rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8);
wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF;
rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF;
h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp;
if (h2cq_size != h2cq_free) {
rtw_err(rtwdev, "H2C queue mismatch\n");
return -EINVAL;
}
return 0;
}
static int rtw_init_trx_cfg(struct rtw_dev *rtwdev)
{
int ret;
ret = txdma_queue_mapping(rtwdev);
if (ret)
return ret;
ret = priority_queue_cfg(rtwdev);
if (ret)
return ret;
ret = init_h2c(rtwdev);
if (ret)
return ret;
return 0;
}
static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
{
u8 value8;
rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
value8 &= 0xF0;
/* For rxdesc len = 0 issue */
value8 |= 0xF;
rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS);
rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9));
return 0;
}
int rtw_mac_init(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
int ret;
ret = rtw_init_trx_cfg(rtwdev);
if (ret)
return ret;
ret = chip->ops->mac_init(rtwdev);
if (ret)
return ret;
ret = rtw_drv_info_cfg(rtwdev);
if (ret)
return ret;
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW_MAC_H__
#define __RTW_MAC_H__
#define RTW_HW_PORT_NUM 5
#define cut_version_to_mask(cut) (0x1 << ((cut) + 1))
#define SDIO_LOCAL_OFFSET 0x10250000
#define DDMA_POLLING_COUNT 1000
#define C2H_PKT_BUF 256
#define PHY_STATUS_SIZE 4
#define ILLEGAL_KEY_GROUP 0xFAAAAA00
/* HW memory address */
#define OCPBASE_TXBUF_88XX 0x18780000
#define OCPBASE_DMEM_88XX 0x00200000
#define OCPBASE_EMEM_88XX 0x00100000
#define RSVD_PG_DRV_NUM 16
#define RSVD_PG_H2C_EXTRAINFO_NUM 24
#define RSVD_PG_H2C_STATICINFO_NUM 8
#define RSVD_PG_H2CQ_NUM 8
#define RSVD_PG_CPU_INSTRUCTION_NUM 0
#define RSVD_PG_FW_TXBUF_NUM 4
void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 primary_ch_idx);
int rtw_mac_power_on(struct rtw_dev *rtwdev);
void rtw_mac_power_off(struct rtw_dev *rtwdev);
int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw);
int rtw_mac_init(struct rtw_dev *rtwdev);
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include "main.h"
#include "sec.h"
#include "tx.h"
#include "fw.h"
#include "mac.h"
#include "ps.h"
#include "reg.h"
#include "debug.h"
static void rtw_ops_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_tx_pkt_info pkt_info = {0};
if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
goto out;
rtw_tx_pkt_info_update(rtwdev, &pkt_info, control, skb);
if (rtw_hci_tx(rtwdev, &pkt_info, skb))
goto out;
return;
out:
ieee80211_free_txskb(hw, skb);
}
static int rtw_ops_start(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
int ret;
mutex_lock(&rtwdev->mutex);
ret = rtw_core_start(rtwdev);
mutex_unlock(&rtwdev->mutex);
return ret;
}
static void rtw_ops_stop(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
mutex_lock(&rtwdev->mutex);
rtw_core_stop(rtwdev);
mutex_unlock(&rtwdev->mutex);
}
static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
{
struct rtw_dev *rtwdev = hw->priv;
int ret = 0;
mutex_lock(&rtwdev->mutex);
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
if (hw->conf.flags & IEEE80211_CONF_IDLE) {
rtw_enter_ips(rtwdev);
} else {
ret = rtw_leave_ips(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to leave idle state\n");
goto out;
}
}
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
rtw_set_channel(rtwdev);
out:
mutex_unlock(&rtwdev->mutex);
return ret;
}
static const struct rtw_vif_port rtw_vif_port[] = {
[0] = {
.mac_addr = {.addr = 0x0610},
.bssid = {.addr = 0x0618},
.net_type = {.addr = 0x0100, .mask = 0x30000},
.aid = {.addr = 0x06a8, .mask = 0x7ff},
},
[1] = {
.mac_addr = {.addr = 0x0700},
.bssid = {.addr = 0x0708},
.net_type = {.addr = 0x0100, .mask = 0xc0000},
.aid = {.addr = 0x0710, .mask = 0x7ff},
},
[2] = {
.mac_addr = {.addr = 0x1620},
.bssid = {.addr = 0x1628},
.net_type = {.addr = 0x1100, .mask = 0x3},
.aid = {.addr = 0x1600, .mask = 0x7ff},
},
[3] = {
.mac_addr = {.addr = 0x1630},
.bssid = {.addr = 0x1638},
.net_type = {.addr = 0x1100, .mask = 0xc},
.aid = {.addr = 0x1604, .mask = 0x7ff},
},
[4] = {
.mac_addr = {.addr = 0x1640},
.bssid = {.addr = 0x1648},
.net_type = {.addr = 0x1100, .mask = 0x30},
.aid = {.addr = 0x1608, .mask = 0x7ff},
},
};
static int rtw_ops_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
enum rtw_net_type net_type;
u32 config = 0;
u8 port = 0;
rtwvif->port = port;
rtwvif->vif = vif;
rtwvif->stats.tx_unicast = 0;
rtwvif->stats.rx_unicast = 0;
rtwvif->stats.tx_cnt = 0;
rtwvif->stats.rx_cnt = 0;
rtwvif->in_lps = false;
rtwvif->conf = &rtw_vif_port[port];
mutex_lock(&rtwdev->mutex);
switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
net_type = RTW_NET_AP_MODE;
break;
case NL80211_IFTYPE_ADHOC:
net_type = RTW_NET_AD_HOC;
break;
case NL80211_IFTYPE_STATION:
default:
net_type = RTW_NET_NO_LINK;
break;
}
ether_addr_copy(rtwvif->mac_addr, vif->addr);
config |= PORT_SET_MAC_ADDR;
rtwvif->net_type = net_type;
config |= PORT_SET_NET_TYPE;
rtw_vif_port_config(rtwdev, rtwvif, config);
mutex_unlock(&rtwdev->mutex);
rtw_info(rtwdev, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
return 0;
}
static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
rtw_info(rtwdev, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
mutex_lock(&rtwdev->mutex);
eth_zero_addr(rtwvif->mac_addr);
config |= PORT_SET_MAC_ADDR;
rtwvif->net_type = RTW_NET_NO_LINK;
config |= PORT_SET_NET_TYPE;
rtw_vif_port_config(rtwdev, rtwvif, config);
mutex_unlock(&rtwdev->mutex);
}
static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
u64 multicast)
{
struct rtw_dev *rtwdev = hw->priv;
*new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL |
FIF_BCN_PRBRESP_PROMISC;
mutex_lock(&rtwdev->mutex);
if (changed_flags & FIF_ALLMULTI) {
if (*new_flags & FIF_ALLMULTI)
rtwdev->hal.rcr |= BIT_AM | BIT_AB;
else
rtwdev->hal.rcr &= ~(BIT_AM | BIT_AB);
}
if (changed_flags & FIF_FCSFAIL) {
if (*new_flags & FIF_FCSFAIL)
rtwdev->hal.rcr |= BIT_ACRC32;
else
rtwdev->hal.rcr &= ~(BIT_ACRC32);
}
if (changed_flags & FIF_OTHER_BSS) {
if (*new_flags & FIF_OTHER_BSS)
rtwdev->hal.rcr |= BIT_AAP;
else
rtwdev->hal.rcr &= ~(BIT_AAP);
}
if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
rtwdev->hal.rcr &= ~(BIT_CBSSID_BCN | BIT_CBSSID_DATA);
else
rtwdev->hal.rcr |= BIT_CBSSID_BCN;
}
rtw_dbg(rtwdev, RTW_DBG_RX,
"config rx filter, changed=0x%08x, new=0x%08x, rcr=0x%08x\n",
changed_flags, *new_flags, rtwdev->hal.rcr);
rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
mutex_unlock(&rtwdev->mutex);
}
static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
u32 changed)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
mutex_lock(&rtwdev->mutex);
if (changed & BSS_CHANGED_ASSOC) {
struct rtw_chip_info *chip = rtwdev->chip;
enum rtw_net_type net_type;
if (conf->assoc) {
net_type = RTW_NET_MGD_LINKED;
chip->ops->do_iqk(rtwdev);
rtwvif->aid = conf->aid;
rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true);
rtw_add_rsvd_page(rtwdev, RSVD_QOS_NULL, true);
rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
rtw_fw_download_rsvd_page(rtwdev, vif);
rtw_send_rsvd_page_h2c(rtwdev);
} else {
net_type = RTW_NET_NO_LINK;
rtwvif->aid = 0;
rtw_reset_rsvd_page(rtwdev);
}
rtwvif->net_type = net_type;
config |= PORT_SET_NET_TYPE;
config |= PORT_SET_AID;
}
if (changed & BSS_CHANGED_BSSID) {
ether_addr_copy(rtwvif->bssid, conf->bssid);
config |= PORT_SET_BSSID;
}
if (changed & BSS_CHANGED_BEACON)
rtw_fw_download_rsvd_page(rtwdev, vif);
rtw_vif_port_config(rtwdev, rtwvif, config);
mutex_unlock(&rtwdev->mutex);
}
static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
{
unsigned long mac_id;
mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
if (mac_id < RTW_MAX_MAC_ID_NUM)
set_bit(mac_id, rtwdev->mac_id_map);
return mac_id;
}
static void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
{
clear_bit(mac_id, rtwdev->mac_id_map);
}
static int rtw_ops_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
int ret = 0;
mutex_lock(&rtwdev->mutex);
si->mac_id = rtw_acquire_macid(rtwdev);
if (si->mac_id >= RTW_MAX_MAC_ID_NUM) {
ret = -ENOSPC;
goto out;
}
si->sta = sta;
si->vif = vif;
si->init_ra_lv = 1;
ewma_rssi_init(&si->avg_rssi);
rtw_update_sta_info(rtwdev, si);
rtw_fw_media_status_report(rtwdev, si->mac_id, true);
rtwdev->sta_cnt++;
rtw_info(rtwdev, "sta %pM joined with macid %d\n",
sta->addr, si->mac_id);
out:
mutex_unlock(&rtwdev->mutex);
return ret;
}
static int rtw_ops_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
mutex_lock(&rtwdev->mutex);
rtw_release_macid(rtwdev, si->mac_id);
rtw_fw_media_status_report(rtwdev, si->mac_id, false);
rtwdev->sta_cnt--;
rtw_info(rtwdev, "sta %pM with macid %d left\n",
sta->addr, si->mac_id);
mutex_unlock(&rtwdev->mutex);
return 0;
}
static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_sec_desc *sec = &rtwdev->sec;
u8 hw_key_type;
u8 hw_key_idx;
int ret = 0;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
hw_key_type = RTW_CAM_WEP40;
break;
case WLAN_CIPHER_SUITE_WEP104:
hw_key_type = RTW_CAM_WEP104;
break;
case WLAN_CIPHER_SUITE_TKIP:
hw_key_type = RTW_CAM_TKIP;
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
break;
case WLAN_CIPHER_SUITE_CCMP:
hw_key_type = RTW_CAM_AES;
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
/* suppress error messages */
return -EOPNOTSUPP;
default:
return -ENOTSUPP;
}
mutex_lock(&rtwdev->mutex);
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
hw_key_idx = rtw_sec_get_free_cam(sec);
} else {
/* multiple interfaces? */
hw_key_idx = key->keyidx;
}
if (hw_key_idx > sec->total_cam_num) {
ret = -ENOSPC;
goto out;
}
switch (cmd) {
case SET_KEY:
/* need sw generated IV */
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
key->hw_key_idx = hw_key_idx;
rtw_sec_write_cam(rtwdev, sec, sta, key,
hw_key_type, hw_key_idx);
break;
case DISABLE_KEY:
rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx);
break;
}
out:
mutex_unlock(&rtwdev->mutex);
return ret;
}
static int rtw_ops_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
struct ieee80211_sta *sta = params->sta;
u16 tid = params->tid;
switch (params->action) {
case IEEE80211_AMPDU_TX_START:
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
break;
default:
WARN_ON(1);
return -ENOTSUPP;
}
return 0;
}
static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr)
{
struct rtw_dev *rtwdev = hw->priv;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
rtw_leave_lps(rtwdev, rtwvif);
rtw_flag_set(rtwdev, RTW_FLAG_DIG_DISABLE);
rtw_flag_set(rtwdev, RTW_FLAG_SCANNING);
}
static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct rtw_dev *rtwdev = hw->priv;
rtw_flag_clear(rtwdev, RTW_FLAG_SCANNING);
rtw_flag_clear(rtwdev, RTW_FLAG_DIG_DISABLE);
}
const struct ieee80211_ops rtw_ops = {
.tx = rtw_ops_tx,
.start = rtw_ops_start,
.stop = rtw_ops_stop,
.config = rtw_ops_config,
.add_interface = rtw_ops_add_interface,
.remove_interface = rtw_ops_remove_interface,
.configure_filter = rtw_ops_configure_filter,
.bss_info_changed = rtw_ops_bss_info_changed,
.sta_add = rtw_ops_sta_add,
.sta_remove = rtw_ops_sta_remove,
.set_key = rtw_ops_set_key,
.ampdu_action = rtw_ops_ampdu_action,
.sw_scan_start = rtw_ops_sw_scan_start,
.sw_scan_complete = rtw_ops_sw_scan_complete,
};
EXPORT_SYMBOL(rtw_ops);
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include "main.h"
#include "regd.h"
#include "fw.h"
#include "ps.h"
#include "sec.h"
#include "mac.h"
#include "phy.h"
#include "reg.h"
#include "efuse.h"
#include "debug.h"
static bool rtw_fw_support_lps;
unsigned int rtw_debug_mask;
EXPORT_SYMBOL(rtw_debug_mask);
module_param_named(support_lps, rtw_fw_support_lps, bool, 0644);
module_param_named(debug_mask, rtw_debug_mask, uint, 0644);
MODULE_PARM_DESC(support_lps, "Set Y to enable LPS support");
MODULE_PARM_DESC(debug_mask, "Debugging mask");
static struct ieee80211_channel rtw_channeltable_2g[] = {
{.center_freq = 2412, .hw_value = 1,},
{.center_freq = 2417, .hw_value = 2,},
{.center_freq = 2422, .hw_value = 3,},
{.center_freq = 2427, .hw_value = 4,},
{.center_freq = 2432, .hw_value = 5,},
{.center_freq = 2437, .hw_value = 6,},
{.center_freq = 2442, .hw_value = 7,},
{.center_freq = 2447, .hw_value = 8,},
{.center_freq = 2452, .hw_value = 9,},
{.center_freq = 2457, .hw_value = 10,},
{.center_freq = 2462, .hw_value = 11,},
{.center_freq = 2467, .hw_value = 12,},
{.center_freq = 2472, .hw_value = 13,},
{.center_freq = 2484, .hw_value = 14,},
};
static struct ieee80211_channel rtw_channeltable_5g[] = {
{.center_freq = 5180, .hw_value = 36,},
{.center_freq = 5200, .hw_value = 40,},
{.center_freq = 5220, .hw_value = 44,},
{.center_freq = 5240, .hw_value = 48,},
{.center_freq = 5260, .hw_value = 52,},
{.center_freq = 5280, .hw_value = 56,},
{.center_freq = 5300, .hw_value = 60,},
{.center_freq = 5320, .hw_value = 64,},
{.center_freq = 5500, .hw_value = 100,},
{.center_freq = 5520, .hw_value = 104,},
{.center_freq = 5540, .hw_value = 108,},
{.center_freq = 5560, .hw_value = 112,},
{.center_freq = 5580, .hw_value = 116,},
{.center_freq = 5600, .hw_value = 120,},
{.center_freq = 5620, .hw_value = 124,},
{.center_freq = 5640, .hw_value = 128,},
{.center_freq = 5660, .hw_value = 132,},
{.center_freq = 5680, .hw_value = 136,},
{.center_freq = 5700, .hw_value = 140,},
{.center_freq = 5745, .hw_value = 149,},
{.center_freq = 5765, .hw_value = 153,},
{.center_freq = 5785, .hw_value = 157,},
{.center_freq = 5805, .hw_value = 161,},
{.center_freq = 5825, .hw_value = 165,
.flags = IEEE80211_CHAN_NO_HT40MINUS},
};
static struct ieee80211_rate rtw_ratetable[] = {
{.bitrate = 10, .hw_value = 0x00,},
{.bitrate = 20, .hw_value = 0x01,},
{.bitrate = 55, .hw_value = 0x02,},
{.bitrate = 110, .hw_value = 0x03,},
{.bitrate = 60, .hw_value = 0x04,},
{.bitrate = 90, .hw_value = 0x05,},
{.bitrate = 120, .hw_value = 0x06,},
{.bitrate = 180, .hw_value = 0x07,},
{.bitrate = 240, .hw_value = 0x08,},
{.bitrate = 360, .hw_value = 0x09,},
{.bitrate = 480, .hw_value = 0x0a,},
{.bitrate = 540, .hw_value = 0x0b,},
};
static struct ieee80211_supported_band rtw_band_2ghz = {
.band = NL80211_BAND_2GHZ,
.channels = rtw_channeltable_2g,
.n_channels = ARRAY_SIZE(rtw_channeltable_2g),
.bitrates = rtw_ratetable,
.n_bitrates = ARRAY_SIZE(rtw_ratetable),
.ht_cap = {0},
.vht_cap = {0},
};
static struct ieee80211_supported_band rtw_band_5ghz = {
.band = NL80211_BAND_5GHZ,
.channels = rtw_channeltable_5g,
.n_channels = ARRAY_SIZE(rtw_channeltable_5g),
/* 5G has no CCK rates */
.bitrates = rtw_ratetable + 4,
.n_bitrates = ARRAY_SIZE(rtw_ratetable) - 4,
.ht_cap = {0},
.vht_cap = {0},
};
struct rtw_watch_dog_iter_data {
struct rtw_vif *rtwvif;
bool active;
u8 assoc_cnt;
};
static void rtw_vif_watch_dog_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct rtw_watch_dog_iter_data *iter_data = data;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
if (vif->type == NL80211_IFTYPE_STATION) {
if (vif->bss_conf.assoc) {
iter_data->assoc_cnt++;
iter_data->rtwvif = rtwvif;
}
if (rtwvif->stats.tx_cnt > RTW_LPS_THRESHOLD ||
rtwvif->stats.rx_cnt > RTW_LPS_THRESHOLD)
iter_data->active = true;
} else {
/* only STATION mode can enter lps */
iter_data->active = true;
}
rtwvif->stats.tx_unicast = 0;
rtwvif->stats.rx_unicast = 0;
rtwvif->stats.tx_cnt = 0;
rtwvif->stats.rx_cnt = 0;
}
/* process TX/RX statistics periodically for hardware,
* the information helps hardware to enhance performance
*/
static void rtw_watch_dog_work(struct work_struct *work)
{
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
watch_dog_work.work);
struct rtw_watch_dog_iter_data data = {};
if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
return;
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
RTW_WATCH_DOG_DELAY_TIME);
/* reset tx/rx statictics */
rtwdev->stats.tx_unicast = 0;
rtwdev->stats.rx_unicast = 0;
rtwdev->stats.tx_cnt = 0;
rtwdev->stats.rx_cnt = 0;
rtw_iterate_vifs(rtwdev, rtw_vif_watch_dog_iter, &data);
/* fw supports only one station associated to enter lps, if there are
* more than two stations associated to the AP, then we can not enter
* lps, because fw does not handle the overlapped beacon interval
*/
if (rtw_fw_support_lps &&
data.rtwvif && !data.active && data.assoc_cnt == 1)
rtw_enter_lps(rtwdev, data.rtwvif);
if (rtw_flag_check(rtwdev, RTW_FLAG_SCANNING))
return;
rtw_phy_dynamic_mechanism(rtwdev);
rtwdev->watch_dog_cnt++;
}
static void rtw_c2h_work(struct work_struct *work)
{
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, c2h_work);
struct sk_buff *skb, *tmp;
skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
skb_unlink(skb, &rtwdev->c2h_queue);
rtw_fw_c2h_cmd_handle(rtwdev, skb);
dev_kfree_skb_any(skb);
}
}
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *chan_params)
{
struct ieee80211_channel *channel = chandef->chan;
enum nl80211_chan_width width = chandef->width;
u32 primary_freq, center_freq;
u8 center_chan;
u8 bandwidth = RTW_CHANNEL_WIDTH_20;
u8 primary_chan_idx = 0;
center_chan = channel->hw_value;
primary_freq = channel->center_freq;
center_freq = chandef->center_freq1;
switch (width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
bandwidth = RTW_CHANNEL_WIDTH_20;
primary_chan_idx = 0;
break;
case NL80211_CHAN_WIDTH_40:
bandwidth = RTW_CHANNEL_WIDTH_40;
if (primary_freq > center_freq) {
primary_chan_idx = 1;
center_chan -= 2;
} else {
primary_chan_idx = 2;
center_chan += 2;
}
break;
case NL80211_CHAN_WIDTH_80:
bandwidth = RTW_CHANNEL_WIDTH_80;
if (primary_freq > center_freq) {
if (primary_freq - center_freq == 10) {
primary_chan_idx = 1;
center_chan -= 2;
} else {
primary_chan_idx = 3;
center_chan -= 6;
}
} else {
if (center_freq - primary_freq == 10) {
primary_chan_idx = 2;
center_chan += 2;
} else {
primary_chan_idx = 4;
center_chan += 6;
}
}
break;
default:
center_chan = 0;
break;
}
chan_params->center_chan = center_chan;
chan_params->bandwidth = bandwidth;
chan_params->primary_chan_idx = primary_chan_idx;
}
void rtw_set_channel(struct rtw_dev *rtwdev)
{
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_channel_params ch_param;
u8 center_chan, bandwidth, primary_chan_idx;
rtw_get_channel_params(&hw->conf.chandef, &ch_param);
if (WARN(ch_param.center_chan == 0, "Invalid channel\n"))
return;
center_chan = ch_param.center_chan;
bandwidth = ch_param.bandwidth;
primary_chan_idx = ch_param.primary_chan_idx;
hal->current_band_width = bandwidth;
hal->current_channel = center_chan;
hal->current_band_type = center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
chip->ops->set_channel(rtwdev, center_chan, bandwidth, primary_chan_idx);
rtw_phy_set_tx_power_level(rtwdev, center_chan);
}
static void rtw_vif_write_addr(struct rtw_dev *rtwdev, u32 start, u8 *addr)
{
int i;
for (i = 0; i < ETH_ALEN; i++)
rtw_write8(rtwdev, start + i, addr[i]);
}
void rtw_vif_port_config(struct rtw_dev *rtwdev,
struct rtw_vif *rtwvif,
u32 config)
{
u32 addr, mask;
if (config & PORT_SET_MAC_ADDR) {
addr = rtwvif->conf->mac_addr.addr;
rtw_vif_write_addr(rtwdev, addr, rtwvif->mac_addr);
}
if (config & PORT_SET_BSSID) {
addr = rtwvif->conf->bssid.addr;
rtw_vif_write_addr(rtwdev, addr, rtwvif->bssid);
}
if (config & PORT_SET_NET_TYPE) {
addr = rtwvif->conf->net_type.addr;
mask = rtwvif->conf->net_type.mask;
rtw_write32_mask(rtwdev, addr, mask, rtwvif->net_type);
}
if (config & PORT_SET_AID) {
addr = rtwvif->conf->aid.addr;
mask = rtwvif->conf->aid.mask;
rtw_write32_mask(rtwdev, addr, mask, rtwvif->aid);
}
}
static u8 hw_bw_cap_to_bitamp(u8 bw_cap)
{
u8 bw = 0;
switch (bw_cap) {
case EFUSE_HW_CAP_IGNORE:
case EFUSE_HW_CAP_SUPP_BW80:
bw |= BIT(RTW_CHANNEL_WIDTH_80);
/* fall through */
case EFUSE_HW_CAP_SUPP_BW40:
bw |= BIT(RTW_CHANNEL_WIDTH_40);
/* fall through */
default:
bw |= BIT(RTW_CHANNEL_WIDTH_20);
break;
}
return bw;
}
static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num)
{
struct rtw_hal *hal = &rtwdev->hal;
if (hw_ant_num == EFUSE_HW_CAP_IGNORE ||
hw_ant_num >= hal->rf_path_num)
return;
switch (hw_ant_num) {
case 1:
hal->rf_type = RF_1T1R;
hal->rf_path_num = 1;
hal->antenna_tx = BB_PATH_A;
hal->antenna_rx = BB_PATH_A;
break;
default:
WARN(1, "invalid hw configuration from efuse\n");
break;
}
}
static u64 get_vht_ra_mask(struct ieee80211_sta *sta)
{
u64 ra_mask = 0;
u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
u8 vht_mcs_cap;
int i, nss;
/* 4SS, every two bits for MCS7/8/9 */
for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 10) {
vht_mcs_cap = mcs_map & 0x3;
switch (vht_mcs_cap) {
case 2: /* MCS9 */
ra_mask |= 0x3ff << nss;
break;
case 1: /* MCS8 */
ra_mask |= 0x1ff << nss;
break;
case 0: /* MCS7 */
ra_mask |= 0x0ff << nss;
break;
default:
break;
}
}
return ra_mask;
}
static u8 get_rate_id(u8 wireless_set, enum rtw_bandwidth bw_mode, u8 tx_num)
{
u8 rate_id = 0;
switch (wireless_set) {
case WIRELESS_CCK:
rate_id = RTW_RATEID_B_20M;
break;
case WIRELESS_OFDM:
rate_id = RTW_RATEID_G;
break;
case WIRELESS_CCK | WIRELESS_OFDM:
rate_id = RTW_RATEID_BG;
break;
case WIRELESS_OFDM | WIRELESS_HT:
if (tx_num == 1)
rate_id = RTW_RATEID_GN_N1SS;
else if (tx_num == 2)
rate_id = RTW_RATEID_GN_N2SS;
else if (tx_num == 3)
rate_id = RTW_RATEID_ARFR5_N_3SS;
break;
case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT:
if (bw_mode == RTW_CHANNEL_WIDTH_40) {
if (tx_num == 1)
rate_id = RTW_RATEID_BGN_40M_1SS;
else if (tx_num == 2)
rate_id = RTW_RATEID_BGN_40M_2SS;
else if (tx_num == 3)
rate_id = RTW_RATEID_ARFR5_N_3SS;
else if (tx_num == 4)
rate_id = RTW_RATEID_ARFR7_N_4SS;
} else {
if (tx_num == 1)
rate_id = RTW_RATEID_BGN_20M_1SS;
else if (tx_num == 2)
rate_id = RTW_RATEID_BGN_20M_2SS;
else if (tx_num == 3)
rate_id = RTW_RATEID_ARFR5_N_3SS;
else if (tx_num == 4)
rate_id = RTW_RATEID_ARFR7_N_4SS;
}
break;
case WIRELESS_OFDM | WIRELESS_VHT:
if (tx_num == 1)
rate_id = RTW_RATEID_ARFR1_AC_1SS;
else if (tx_num == 2)
rate_id = RTW_RATEID_ARFR0_AC_2SS;
else if (tx_num == 3)
rate_id = RTW_RATEID_ARFR4_AC_3SS;
else if (tx_num == 4)
rate_id = RTW_RATEID_ARFR6_AC_4SS;
break;
case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_VHT:
if (bw_mode >= RTW_CHANNEL_WIDTH_80) {
if (tx_num == 1)
rate_id = RTW_RATEID_ARFR1_AC_1SS;
else if (tx_num == 2)
rate_id = RTW_RATEID_ARFR0_AC_2SS;
else if (tx_num == 3)
rate_id = RTW_RATEID_ARFR4_AC_3SS;
else if (tx_num == 4)
rate_id = RTW_RATEID_ARFR6_AC_4SS;
} else {
if (tx_num == 1)
rate_id = RTW_RATEID_ARFR2_AC_2G_1SS;
else if (tx_num == 2)
rate_id = RTW_RATEID_ARFR3_AC_2G_2SS;
else if (tx_num == 3)
rate_id = RTW_RATEID_ARFR4_AC_3SS;
else if (tx_num == 4)
rate_id = RTW_RATEID_ARFR6_AC_4SS;
}
break;
default:
break;
}
return rate_id;
}
#define RA_MASK_CCK_RATES 0x0000f
#define RA_MASK_OFDM_RATES 0x00ff0
#define RA_MASK_HT_RATES_1SS (0xff000 << 0)
#define RA_MASK_HT_RATES_2SS (0xff000 << 8)
#define RA_MASK_HT_RATES_3SS (0xff000 << 16)
#define RA_MASK_HT_RATES (RA_MASK_HT_RATES_1SS | \
RA_MASK_HT_RATES_2SS | \
RA_MASK_HT_RATES_3SS)
#define RA_MASK_VHT_RATES_1SS (0x3ff000 << 0)
#define RA_MASK_VHT_RATES_2SS (0x3ff000 << 10)
#define RA_MASK_VHT_RATES_3SS (0x3ff000 << 20)
#define RA_MASK_VHT_RATES (RA_MASK_VHT_RATES_1SS | \
RA_MASK_VHT_RATES_2SS | \
RA_MASK_VHT_RATES_3SS)
#define RA_MASK_CCK_IN_HT 0x00005
#define RA_MASK_CCK_IN_VHT 0x00005
#define RA_MASK_OFDM_IN_VHT 0x00010
#define RA_MASK_OFDM_IN_HT_2G 0x00010
#define RA_MASK_OFDM_IN_HT_5G 0x00030
void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
{
struct ieee80211_sta *sta = si->sta;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_hal *hal = &rtwdev->hal;
u8 rssi_level;
u8 wireless_set;
u8 bw_mode;
u8 rate_id;
u8 rf_type = RF_1T1R;
u8 stbc_en = 0;
u8 ldpc_en = 0;
u8 tx_num = 1;
u64 ra_mask = 0;
bool is_vht_enable = false;
bool is_support_sgi = false;
if (sta->vht_cap.vht_supported) {
is_vht_enable = true;
ra_mask |= get_vht_ra_mask(sta);
if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
stbc_en = VHT_STBC_EN;
if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
ldpc_en = VHT_LDPC_EN;
if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
is_support_sgi = true;
} else if (sta->ht_cap.ht_supported) {
ra_mask |= (sta->ht_cap.mcs.rx_mask[NL80211_BAND_5GHZ] << 20) |
(sta->ht_cap.mcs.rx_mask[NL80211_BAND_2GHZ] << 12);
if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = HT_STBC_EN;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
ldpc_en = HT_LDPC_EN;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20 ||
sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
is_support_sgi = true;
}
if (hal->current_band_type == RTW_BAND_5G) {
ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
if (sta->vht_cap.vht_supported) {
ra_mask &= RA_MASK_VHT_RATES | RA_MASK_OFDM_IN_VHT;
wireless_set = WIRELESS_OFDM | WIRELESS_VHT;
} else if (sta->ht_cap.ht_supported) {
ra_mask &= RA_MASK_HT_RATES | RA_MASK_OFDM_IN_HT_5G;
wireless_set = WIRELESS_OFDM | WIRELESS_HT;
} else {
wireless_set = WIRELESS_OFDM;
}
} else if (hal->current_band_type == RTW_BAND_2G) {
ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
if (sta->vht_cap.vht_supported) {
ra_mask &= RA_MASK_VHT_RATES | RA_MASK_CCK_IN_VHT |
RA_MASK_OFDM_IN_VHT;
wireless_set = WIRELESS_CCK | WIRELESS_OFDM |
WIRELESS_HT | WIRELESS_VHT;
} else if (sta->ht_cap.ht_supported) {
ra_mask &= RA_MASK_HT_RATES | RA_MASK_CCK_IN_HT |
RA_MASK_OFDM_IN_HT_2G;
wireless_set = WIRELESS_CCK | WIRELESS_OFDM |
WIRELESS_HT;
} else if (sta->supp_rates[0] <= 0xf) {
wireless_set = WIRELESS_CCK;
} else {
wireless_set = WIRELESS_CCK | WIRELESS_OFDM;
}
} else {
rtw_err(rtwdev, "Unknown band type\n");
wireless_set = 0;
}
if (efuse->hw_cap.nss == 1) {
ra_mask &= RA_MASK_VHT_RATES_1SS;
ra_mask &= RA_MASK_HT_RATES_1SS;
}
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_80:
bw_mode = RTW_CHANNEL_WIDTH_80;
break;
case IEEE80211_STA_RX_BW_40:
bw_mode = RTW_CHANNEL_WIDTH_40;
break;
default:
bw_mode = RTW_CHANNEL_WIDTH_20;
break;
}
if (sta->vht_cap.vht_supported && ra_mask & 0xffc00000) {
tx_num = 2;
rf_type = RF_2T2R;
} else if (sta->ht_cap.ht_supported && ra_mask & 0xfff00000) {
tx_num = 2;
rf_type = RF_2T2R;
}
rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
if (wireless_set != WIRELESS_CCK) {
rssi_level = si->rssi_level;
if (rssi_level == 0)
ra_mask &= 0xffffffffffffffffULL;
else if (rssi_level == 1)
ra_mask &= 0xfffffffffffffff0ULL;
else if (rssi_level == 2)
ra_mask &= 0xffffffffffffefe0ULL;
else if (rssi_level == 3)
ra_mask &= 0xffffffffffffcfc0ULL;
else if (rssi_level == 4)
ra_mask &= 0xffffffffffff8f80ULL;
else if (rssi_level >= 5)
ra_mask &= 0xffffffffffff0f00ULL;
}
si->bw_mode = bw_mode;
si->stbc_en = stbc_en;
si->ldpc_en = ldpc_en;
si->rf_type = rf_type;
si->wireless_set = wireless_set;
si->sgi_enable = is_support_sgi;
si->vht_enable = is_vht_enable;
si->ra_mask = ra_mask;
si->rate_id = rate_id;
rtw_fw_send_ra_info(rtwdev, si);
}
static int rtw_power_on(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fw_state *fw = &rtwdev->fw;
int ret;
ret = rtw_hci_setup(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to setup hci\n");
goto err;
}
/* power on MAC before firmware downloaded */
ret = rtw_mac_power_on(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to power on mac\n");
goto err;
}
wait_for_completion(&fw->completion);
if (!fw->firmware) {
ret = -EINVAL;
rtw_err(rtwdev, "failed to load firmware\n");
goto err;
}
ret = rtw_download_firmware(rtwdev, fw);
if (ret) {
rtw_err(rtwdev, "failed to download firmware\n");
goto err_off;
}
/* config mac after firmware downloaded */
ret = rtw_mac_init(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to configure mac\n");
goto err_off;
}
chip->ops->phy_set_param(rtwdev);
ret = rtw_hci_start(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to start hci\n");
goto err_off;
}
return 0;
err_off:
rtw_mac_power_off(rtwdev);
err:
return ret;
}
int rtw_core_start(struct rtw_dev *rtwdev)
{
int ret;
ret = rtw_power_on(rtwdev);
if (ret)
return ret;
rtw_sec_enable_sec_engine(rtwdev);
/* rcr reset after powered on */
rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
RTW_WATCH_DOG_DELAY_TIME);
rtw_flag_set(rtwdev, RTW_FLAG_RUNNING);
return 0;
}
static void rtw_power_off(struct rtw_dev *rtwdev)
{
rtwdev->hci.ops->stop(rtwdev);
rtw_mac_power_off(rtwdev);
}
void rtw_core_stop(struct rtw_dev *rtwdev)
{
rtw_flag_clear(rtwdev, RTW_FLAG_RUNNING);
rtw_flag_clear(rtwdev, RTW_FLAG_FW_RUNNING);
cancel_delayed_work_sync(&rtwdev->watch_dog_work);
rtw_power_off(rtwdev);
}
static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
struct ieee80211_sta_ht_cap *ht_cap)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
ht_cap->ht_supported = true;
ht_cap->cap = 0;
ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_MAX_AMSDU |
IEEE80211_HT_CAP_LDPC_CODING |
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
if (efuse->hw_cap.bw & BIT(RTW_CHANNEL_WIDTH_40))
ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_DSSSCCK40 |
IEEE80211_HT_CAP_SGI_40;
ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
if (efuse->hw_cap.nss > 1) {
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0xFF;
ht_cap->mcs.rx_mask[4] = 0x01;
ht_cap->mcs.rx_highest = cpu_to_le16(300);
} else {
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0x00;
ht_cap->mcs.rx_mask[4] = 0x01;
ht_cap->mcs.rx_highest = cpu_to_le16(150);
}
}
static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
struct ieee80211_sta_vht_cap *vht_cap)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
u16 mcs_map;
__le16 highest;
if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE &&
efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT)
return;
vht_cap->vht_supported = true;
vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_TXSTBC |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_HTC_VHT |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
0;
mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
if (efuse->hw_cap.nss > 1) {
highest = cpu_to_le16(780);
mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2;
} else {
highest = cpu_to_le16(390);
mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2;
}
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.rx_highest = highest;
vht_cap->vht_mcs.tx_highest = highest;
}
static void rtw_set_supported_band(struct ieee80211_hw *hw,
struct rtw_chip_info *chip)
{
struct rtw_dev *rtwdev = hw->priv;
struct ieee80211_supported_band *sband;
if (chip->band & RTW_BAND_2G) {
sband = kmemdup(&rtw_band_2ghz, sizeof(*sband), GFP_KERNEL);
if (!sband)
goto err_out;
if (chip->ht_supported)
rtw_init_ht_cap(rtwdev, &sband->ht_cap);
hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
}
if (chip->band & RTW_BAND_5G) {
sband = kmemdup(&rtw_band_5ghz, sizeof(*sband), GFP_KERNEL);
if (!sband)
goto err_out;
if (chip->ht_supported)
rtw_init_ht_cap(rtwdev, &sband->ht_cap);
if (chip->vht_supported)
rtw_init_vht_cap(rtwdev, &sband->vht_cap);
hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
}
return;
err_out:
rtw_err(rtwdev, "failed to set supported band\n");
kfree(sband);
}
static void rtw_unset_supported_band(struct ieee80211_hw *hw,
struct rtw_chip_info *chip)
{
kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
}
static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
{
struct rtw_dev *rtwdev = context;
struct rtw_fw_state *fw = &rtwdev->fw;
if (!firmware)
rtw_err(rtwdev, "failed to request firmware\n");
fw->firmware = firmware;
complete_all(&fw->completion);
}
static int rtw_load_firmware(struct rtw_dev *rtwdev, const char *fw_name)
{
struct rtw_fw_state *fw = &rtwdev->fw;
int ret;
init_completion(&fw->completion);
ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev,
GFP_KERNEL, rtwdev, rtw_load_firmware_cb);
if (ret) {
rtw_err(rtwdev, "async firmware request failed\n");
return ret;
}
return 0;
}
static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_efuse *efuse = &rtwdev->efuse;
u32 wl_bt_pwr_ctrl;
int ret = 0;
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
rtwdev->hci.rpwm_addr = 0x03d9;
break;
default:
rtw_err(rtwdev, "unsupported hci type\n");
return -EINVAL;
}
wl_bt_pwr_ctrl = rtw_read32(rtwdev, REG_WL_BT_PWR_CTRL);
if (wl_bt_pwr_ctrl & BIT_BT_FUNC_EN)
rtwdev->efuse.btcoex = true;
hal->chip_version = rtw_read32(rtwdev, REG_SYS_CFG1);
hal->fab_version = BIT_GET_VENDOR_ID(hal->chip_version) >> 2;
hal->cut_version = BIT_GET_CHIP_VER(hal->chip_version);
hal->mp_chip = (hal->chip_version & BIT_RTL_ID) ? 0 : 1;
if (hal->chip_version & BIT_RF_TYPE_ID) {
hal->rf_type = RF_2T2R;
hal->rf_path_num = 2;
hal->antenna_tx = BB_PATH_AB;
hal->antenna_rx = BB_PATH_AB;
} else {
hal->rf_type = RF_1T1R;
hal->rf_path_num = 1;
hal->antenna_tx = BB_PATH_A;
hal->antenna_rx = BB_PATH_A;
}
if (hal->fab_version == 2)
hal->fab_version = 1;
else if (hal->fab_version == 1)
hal->fab_version = 2;
efuse->physical_size = chip->phy_efuse_size;
efuse->logical_size = chip->log_efuse_size;
efuse->protect_size = chip->ptct_efuse_size;
/* default use ack */
rtwdev->hal.rcr |= BIT_VHT_DACK;
return ret;
}
static int rtw_chip_efuse_enable(struct rtw_dev *rtwdev)
{
struct rtw_fw_state *fw = &rtwdev->fw;
int ret;
ret = rtw_hci_setup(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to setup hci\n");
goto err;
}
ret = rtw_mac_power_on(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to power on mac\n");
goto err;
}
rtw_write8(rtwdev, REG_C2HEVT, C2H_HW_FEATURE_DUMP);
wait_for_completion(&fw->completion);
if (!fw->firmware) {
ret = -EINVAL;
rtw_err(rtwdev, "failed to load firmware\n");
goto err;
}
ret = rtw_download_firmware(rtwdev, fw);
if (ret) {
rtw_err(rtwdev, "failed to download firmware\n");
goto err_off;
}
return 0;
err_off:
rtw_mac_power_off(rtwdev);
err:
return ret;
}
static int rtw_dump_hw_feature(struct rtw_dev *rtwdev)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 hw_feature[HW_FEATURE_LEN];
u8 id;
u8 bw;
int i;
id = rtw_read8(rtwdev, REG_C2HEVT);
if (id != C2H_HW_FEATURE_REPORT) {
rtw_err(rtwdev, "failed to read hw feature report\n");
return -EBUSY;
}
for (i = 0; i < HW_FEATURE_LEN; i++)
hw_feature[i] = rtw_read8(rtwdev, REG_C2HEVT + 2 + i);
rtw_write8(rtwdev, REG_C2HEVT, 0);
bw = GET_EFUSE_HW_CAP_BW(hw_feature);
efuse->hw_cap.bw = hw_bw_cap_to_bitamp(bw);
efuse->hw_cap.hci = GET_EFUSE_HW_CAP_HCI(hw_feature);
efuse->hw_cap.nss = GET_EFUSE_HW_CAP_NSS(hw_feature);
efuse->hw_cap.ptcl = GET_EFUSE_HW_CAP_PTCL(hw_feature);
efuse->hw_cap.ant_num = GET_EFUSE_HW_CAP_ANT_NUM(hw_feature);
rtw_hw_config_rf_ant_num(rtwdev, efuse->hw_cap.ant_num);
if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE)
efuse->hw_cap.nss = rtwdev->hal.rf_path_num;
rtw_dbg(rtwdev, RTW_DBG_EFUSE,
"hw cap: hci=0x%02x, bw=0x%02x, ptcl=0x%02x, ant_num=%d, nss=%d\n",
efuse->hw_cap.hci, efuse->hw_cap.bw, efuse->hw_cap.ptcl,
efuse->hw_cap.ant_num, efuse->hw_cap.nss);
return 0;
}
static void rtw_chip_efuse_disable(struct rtw_dev *rtwdev)
{
rtw_hci_stop(rtwdev);
rtw_mac_power_off(rtwdev);
}
static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
int ret;
mutex_lock(&rtwdev->mutex);
/* power on mac to read efuse */
ret = rtw_chip_efuse_enable(rtwdev);
if (ret)
goto out;
ret = rtw_parse_efuse_map(rtwdev);
if (ret)
goto out;
ret = rtw_dump_hw_feature(rtwdev);
if (ret)
goto out;
ret = rtw_check_supported_rfe(rtwdev);
if (ret)
goto out;
if (efuse->crystal_cap == 0xff)
efuse->crystal_cap = 0;
if (efuse->pa_type_2g == 0xff)
efuse->pa_type_2g = 0;
if (efuse->pa_type_5g == 0xff)
efuse->pa_type_5g = 0;
if (efuse->lna_type_2g == 0xff)
efuse->lna_type_2g = 0;
if (efuse->lna_type_5g == 0xff)
efuse->lna_type_5g = 0;
if (efuse->channel_plan == 0xff)
efuse->channel_plan = 0x7f;
if (efuse->bt_setting & BIT(0))
efuse->share_ant = true;
if (efuse->regd == 0xff)
efuse->regd = 0;
efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0;
efuse->ext_lna_2g = efuse->lna_type_2g & BIT(3) ? 1 : 0;
efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0;
efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0;
rtw_chip_efuse_disable(rtwdev);
out:
mutex_unlock(&rtwdev->mutex);
return ret;
}
static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev);
if (!rfe_def)
return -ENODEV;
rtw_phy_setup_phy_cond(rtwdev, 0);
rtw_hw_init_tx_power(hal);
rtw_load_table(rtwdev, rfe_def->phy_pg_tbl);
rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl);
rtw_phy_tx_power_by_rate_config(hal);
rtw_phy_tx_power_limit_config(hal);
return 0;
}
int rtw_chip_info_setup(struct rtw_dev *rtwdev)
{
int ret;
ret = rtw_chip_parameter_setup(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to setup chip parameters\n");
goto err_out;
}
ret = rtw_chip_efuse_info_setup(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to setup chip efuse info\n");
goto err_out;
}
ret = rtw_chip_board_info_setup(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to setup chip board info\n");
goto err_out;
}
return 0;
err_out:
return ret;
}
EXPORT_SYMBOL(rtw_chip_info_setup);
int rtw_core_init(struct rtw_dev *rtwdev)
{
int ret;
INIT_LIST_HEAD(&rtwdev->rsvd_page_list);
timer_setup(&rtwdev->tx_report.purge_timer,
rtw_tx_report_purge_timer, 0);
INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
INIT_DELAYED_WORK(&rtwdev->lps_work, rtw_lps_work);
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
skb_queue_head_init(&rtwdev->c2h_queue);
skb_queue_head_init(&rtwdev->tx_report.queue);
spin_lock_init(&rtwdev->dm_lock);
spin_lock_init(&rtwdev->rf_lock);
spin_lock_init(&rtwdev->h2c.lock);
spin_lock_init(&rtwdev->tx_report.q_lock);
mutex_init(&rtwdev->mutex);
mutex_init(&rtwdev->hal.tx_power_mutex);
rtwdev->sec.total_cam_num = 32;
rtwdev->hal.current_channel = 1;
set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map);
mutex_lock(&rtwdev->mutex);
rtw_add_rsvd_page(rtwdev, RSVD_BEACON, false);
mutex_unlock(&rtwdev->mutex);
/* default rx filter setting */
rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV |
BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS |
BIT_AB | BIT_AM | BIT_APM;
ret = rtw_load_firmware(rtwdev, rtwdev->chip->fw_name);
if (ret) {
rtw_warn(rtwdev, "no firmware loaded\n");
return ret;
}
return 0;
}
EXPORT_SYMBOL(rtw_core_init);
void rtw_core_deinit(struct rtw_dev *rtwdev)
{
struct rtw_fw_state *fw = &rtwdev->fw;
struct rtw_rsvd_page *rsvd_pkt, *tmp;
unsigned long flags;
if (fw->firmware)
release_firmware(fw->firmware);
spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
skb_queue_purge(&rtwdev->tx_report.queue);
spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, list) {
list_del(&rsvd_pkt->list);
kfree(rsvd_pkt);
}
mutex_destroy(&rtwdev->mutex);
mutex_destroy(&rtwdev->hal.tx_power_mutex);
}
EXPORT_SYMBOL(rtw_core_deinit);
int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
{
int max_tx_headroom = 0;
int ret;
/* TODO: USB & SDIO may need extra room? */
max_tx_headroom = rtwdev->chip->tx_pkt_desc_sz;
hw->extra_tx_headroom = max_tx_headroom;
hw->queues = IEEE80211_NUM_ACS;
hw->sta_data_size = sizeof(struct rtw_sta_info);
hw->vif_data_size = sizeof(struct rtw_vif);
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, RX_INCLUDES_FCS);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
rtw_set_supported_band(hw, rtwdev->chip);
SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
rtw_regd_init(rtwdev, rtw_regd_notifier);
ret = ieee80211_register_hw(hw);
if (ret) {
rtw_err(rtwdev, "failed to register hw\n");
return ret;
}
if (regulatory_hint(hw->wiphy, rtwdev->regd.alpha2))
rtw_err(rtwdev, "regulatory_hint fail\n");
rtw_debugfs_init(rtwdev);
return 0;
}
EXPORT_SYMBOL(rtw_register_hw);
void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
{
struct rtw_chip_info *chip = rtwdev->chip;
ieee80211_unregister_hw(hw);
rtw_unset_supported_band(hw, chip);
}
EXPORT_SYMBOL(rtw_unregister_hw);
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ac wireless core module");
MODULE_LICENSE("Dual BSD/GPL");
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTK_MAIN_H_
#define __RTK_MAIN_H_
#include <net/mac80211.h>
#include <linux/vmalloc.h>
#include <linux/firmware.h>
#include <linux/average.h>
#include <linux/bitops.h>
#include <linux/bitfield.h>
#include "util.h"
#define RTW_MAX_MAC_ID_NUM 32
#define RTW_MAX_SEC_CAM_NUM 32
#define RTW_WATCH_DOG_DELAY_TIME round_jiffies_relative(HZ * 2)
#define RFREG_MASK 0xfffff
#define INV_RF_DATA 0xffffffff
#define TX_PAGE_SIZE_SHIFT 7
#define RTW_CHANNEL_WIDTH_MAX 3
#define RTW_RF_PATH_MAX 4
#define HW_FEATURE_LEN 13
extern unsigned int rtw_debug_mask;
extern const struct ieee80211_ops rtw_ops;
extern struct rtw_chip_info rtw8822b_hw_spec;
extern struct rtw_chip_info rtw8822c_hw_spec;
#define RTW_MAX_CHANNEL_NUM_2G 14
#define RTW_MAX_CHANNEL_NUM_5G 49
struct rtw_dev;
enum rtw_hci_type {
RTW_HCI_TYPE_PCIE,
RTW_HCI_TYPE_USB,
RTW_HCI_TYPE_SDIO,
RTW_HCI_TYPE_UNDEFINE,
};
struct rtw_hci {
struct rtw_hci_ops *ops;
enum rtw_hci_type type;
u32 rpwm_addr;
u8 bulkout_num;
};
enum rtw_supported_band {
RTW_BAND_2G = 1 << 0,
RTW_BAND_5G = 1 << 1,
RTW_BAND_60G = 1 << 2,
RTW_BAND_MAX,
};
enum rtw_bandwidth {
RTW_CHANNEL_WIDTH_20 = 0,
RTW_CHANNEL_WIDTH_40 = 1,
RTW_CHANNEL_WIDTH_80 = 2,
RTW_CHANNEL_WIDTH_160 = 3,
RTW_CHANNEL_WIDTH_80_80 = 4,
RTW_CHANNEL_WIDTH_5 = 5,
RTW_CHANNEL_WIDTH_10 = 6,
};
enum rtw_net_type {
RTW_NET_NO_LINK = 0,
RTW_NET_AD_HOC = 1,
RTW_NET_MGD_LINKED = 2,
RTW_NET_AP_MODE = 3,
};
enum rtw_rf_type {
RF_1T1R = 0,
RF_1T2R = 1,
RF_2T2R = 2,
RF_2T3R = 3,
RF_2T4R = 4,
RF_3T3R = 5,
RF_3T4R = 6,
RF_4T4R = 7,
RF_TYPE_MAX,
};
enum rtw_rf_path {
RF_PATH_A = 0,
RF_PATH_B = 1,
RF_PATH_C = 2,
RF_PATH_D = 3,
};
enum rtw_bb_path {
BB_PATH_A = BIT(0),
BB_PATH_B = BIT(1),
BB_PATH_C = BIT(2),
BB_PATH_D = BIT(3),
BB_PATH_AB = (BB_PATH_A | BB_PATH_B),
BB_PATH_AC = (BB_PATH_A | BB_PATH_C),
BB_PATH_AD = (BB_PATH_A | BB_PATH_D),
BB_PATH_BC = (BB_PATH_B | BB_PATH_C),
BB_PATH_BD = (BB_PATH_B | BB_PATH_D),
BB_PATH_CD = (BB_PATH_C | BB_PATH_D),
BB_PATH_ABC = (BB_PATH_A | BB_PATH_B | BB_PATH_C),
BB_PATH_ABD = (BB_PATH_A | BB_PATH_B | BB_PATH_D),
BB_PATH_ACD = (BB_PATH_A | BB_PATH_C | BB_PATH_D),
BB_PATH_BCD = (BB_PATH_B | BB_PATH_C | BB_PATH_D),
BB_PATH_ABCD = (BB_PATH_A | BB_PATH_B | BB_PATH_C | BB_PATH_D),
};
enum rtw_rate_section {
RTW_RATE_SECTION_CCK = 0,
RTW_RATE_SECTION_OFDM,
RTW_RATE_SECTION_HT_1S,
RTW_RATE_SECTION_HT_2S,
RTW_RATE_SECTION_VHT_1S,
RTW_RATE_SECTION_VHT_2S,
/* keep last */
RTW_RATE_SECTION_MAX,
};
enum rtw_wireless_set {
WIRELESS_CCK = 0x00000001,
WIRELESS_OFDM = 0x00000002,
WIRELESS_HT = 0x00000004,
WIRELESS_VHT = 0x00000008,
};
#define HT_STBC_EN BIT(0)
#define VHT_STBC_EN BIT(1)
#define HT_LDPC_EN BIT(0)
#define VHT_LDPC_EN BIT(1)
enum rtw_chip_type {
RTW_CHIP_TYPE_8822B,
RTW_CHIP_TYPE_8822C,
};
enum rtw_tx_queue_type {
/* the order of AC queues matters */
RTW_TX_QUEUE_BK = 0x0,
RTW_TX_QUEUE_BE = 0x1,
RTW_TX_QUEUE_VI = 0x2,
RTW_TX_QUEUE_VO = 0x3,
RTW_TX_QUEUE_BCN = 0x4,
RTW_TX_QUEUE_MGMT = 0x5,
RTW_TX_QUEUE_HI0 = 0x6,
RTW_TX_QUEUE_H2C = 0x7,
/* keep it last */
RTK_MAX_TX_QUEUE_NUM
};
enum rtw_rx_queue_type {
RTW_RX_QUEUE_MPDU = 0x0,
RTW_RX_QUEUE_C2H = 0x1,
/* keep it last */
RTK_MAX_RX_QUEUE_NUM
};
enum rtw_rate_index {
RTW_RATEID_BGN_40M_2SS = 0,
RTW_RATEID_BGN_40M_1SS = 1,
RTW_RATEID_BGN_20M_2SS = 2,
RTW_RATEID_BGN_20M_1SS = 3,
RTW_RATEID_GN_N2SS = 4,
RTW_RATEID_GN_N1SS = 5,
RTW_RATEID_BG = 6,
RTW_RATEID_G = 7,
RTW_RATEID_B_20M = 8,
RTW_RATEID_ARFR0_AC_2SS = 9,
RTW_RATEID_ARFR1_AC_1SS = 10,
RTW_RATEID_ARFR2_AC_2G_1SS = 11,
RTW_RATEID_ARFR3_AC_2G_2SS = 12,
RTW_RATEID_ARFR4_AC_3SS = 13,
RTW_RATEID_ARFR5_N_3SS = 14,
RTW_RATEID_ARFR7_N_4SS = 15,
RTW_RATEID_ARFR6_AC_4SS = 16
};
enum rtw_trx_desc_rate {
DESC_RATE1M = 0x00,
DESC_RATE2M = 0x01,
DESC_RATE5_5M = 0x02,
DESC_RATE11M = 0x03,
DESC_RATE6M = 0x04,
DESC_RATE9M = 0x05,
DESC_RATE12M = 0x06,
DESC_RATE18M = 0x07,
DESC_RATE24M = 0x08,
DESC_RATE36M = 0x09,
DESC_RATE48M = 0x0a,
DESC_RATE54M = 0x0b,
DESC_RATEMCS0 = 0x0c,
DESC_RATEMCS1 = 0x0d,
DESC_RATEMCS2 = 0x0e,
DESC_RATEMCS3 = 0x0f,
DESC_RATEMCS4 = 0x10,
DESC_RATEMCS5 = 0x11,
DESC_RATEMCS6 = 0x12,
DESC_RATEMCS7 = 0x13,
DESC_RATEMCS8 = 0x14,
DESC_RATEMCS9 = 0x15,
DESC_RATEMCS10 = 0x16,
DESC_RATEMCS11 = 0x17,
DESC_RATEMCS12 = 0x18,
DESC_RATEMCS13 = 0x19,
DESC_RATEMCS14 = 0x1a,
DESC_RATEMCS15 = 0x1b,
DESC_RATEMCS16 = 0x1c,
DESC_RATEMCS17 = 0x1d,
DESC_RATEMCS18 = 0x1e,
DESC_RATEMCS19 = 0x1f,
DESC_RATEMCS20 = 0x20,
DESC_RATEMCS21 = 0x21,
DESC_RATEMCS22 = 0x22,
DESC_RATEMCS23 = 0x23,
DESC_RATEMCS24 = 0x24,
DESC_RATEMCS25 = 0x25,
DESC_RATEMCS26 = 0x26,
DESC_RATEMCS27 = 0x27,
DESC_RATEMCS28 = 0x28,
DESC_RATEMCS29 = 0x29,
DESC_RATEMCS30 = 0x2a,
DESC_RATEMCS31 = 0x2b,
DESC_RATEVHT1SS_MCS0 = 0x2c,
DESC_RATEVHT1SS_MCS1 = 0x2d,
DESC_RATEVHT1SS_MCS2 = 0x2e,
DESC_RATEVHT1SS_MCS3 = 0x2f,
DESC_RATEVHT1SS_MCS4 = 0x30,
DESC_RATEVHT1SS_MCS5 = 0x31,
DESC_RATEVHT1SS_MCS6 = 0x32,
DESC_RATEVHT1SS_MCS7 = 0x33,
DESC_RATEVHT1SS_MCS8 = 0x34,
DESC_RATEVHT1SS_MCS9 = 0x35,
DESC_RATEVHT2SS_MCS0 = 0x36,
DESC_RATEVHT2SS_MCS1 = 0x37,
DESC_RATEVHT2SS_MCS2 = 0x38,
DESC_RATEVHT2SS_MCS3 = 0x39,
DESC_RATEVHT2SS_MCS4 = 0x3a,
DESC_RATEVHT2SS_MCS5 = 0x3b,
DESC_RATEVHT2SS_MCS6 = 0x3c,
DESC_RATEVHT2SS_MCS7 = 0x3d,
DESC_RATEVHT2SS_MCS8 = 0x3e,
DESC_RATEVHT2SS_MCS9 = 0x3f,
DESC_RATEVHT3SS_MCS0 = 0x40,
DESC_RATEVHT3SS_MCS1 = 0x41,
DESC_RATEVHT3SS_MCS2 = 0x42,
DESC_RATEVHT3SS_MCS3 = 0x43,
DESC_RATEVHT3SS_MCS4 = 0x44,
DESC_RATEVHT3SS_MCS5 = 0x45,
DESC_RATEVHT3SS_MCS6 = 0x46,
DESC_RATEVHT3SS_MCS7 = 0x47,
DESC_RATEVHT3SS_MCS8 = 0x48,
DESC_RATEVHT3SS_MCS9 = 0x49,
DESC_RATEVHT4SS_MCS0 = 0x4a,
DESC_RATEVHT4SS_MCS1 = 0x4b,
DESC_RATEVHT4SS_MCS2 = 0x4c,
DESC_RATEVHT4SS_MCS3 = 0x4d,
DESC_RATEVHT4SS_MCS4 = 0x4e,
DESC_RATEVHT4SS_MCS5 = 0x4f,
DESC_RATEVHT4SS_MCS6 = 0x50,
DESC_RATEVHT4SS_MCS7 = 0x51,
DESC_RATEVHT4SS_MCS8 = 0x52,
DESC_RATEVHT4SS_MCS9 = 0x53,
DESC_RATE_MAX,
};
enum rtw_regulatory_domains {
RTW_REGD_FCC = 0,
RTW_REGD_MKK = 1,
RTW_REGD_ETSI = 2,
RTW_REGD_WW = 3,
RTW_REGD_MAX
};
enum rtw_flags {
RTW_FLAG_RUNNING,
RTW_FLAG_FW_RUNNING,
RTW_FLAG_SCANNING,
RTW_FLAG_INACTIVE_PS,
RTW_FLAG_LEISURE_PS,
RTW_FLAG_DIG_DISABLE,
NUM_OF_RTW_FLAGS,
};
/* the power index is represented by differences, which cck-1s & ht40-1s are
* the base values, so for 1s's differences, there are only ht20 & ofdm
*/
struct rtw_2g_1s_pwr_idx_diff {
#ifdef __LITTLE_ENDIAN
s8 ofdm:4;
s8 bw20:4;
#else
s8 bw20:4;
s8 ofdm:4;
#endif
} __packed;
struct rtw_2g_ns_pwr_idx_diff {
#ifdef __LITTLE_ENDIAN
s8 bw20:4;
s8 bw40:4;
s8 cck:4;
s8 ofdm:4;
#else
s8 ofdm:4;
s8 cck:4;
s8 bw40:4;
s8 bw20:4;
#endif
} __packed;
struct rtw_2g_txpwr_idx {
u8 cck_base[6];
u8 bw40_base[5];
struct rtw_2g_1s_pwr_idx_diff ht_1s_diff;
struct rtw_2g_ns_pwr_idx_diff ht_2s_diff;
struct rtw_2g_ns_pwr_idx_diff ht_3s_diff;
struct rtw_2g_ns_pwr_idx_diff ht_4s_diff;
};
struct rtw_5g_ht_1s_pwr_idx_diff {
#ifdef __LITTLE_ENDIAN
s8 ofdm:4;
s8 bw20:4;
#else
s8 bw20:4;
s8 ofdm:4;
#endif
} __packed;
struct rtw_5g_ht_ns_pwr_idx_diff {
#ifdef __LITTLE_ENDIAN
s8 bw20:4;
s8 bw40:4;
#else
s8 bw40:4;
s8 bw20:4;
#endif
} __packed;
struct rtw_5g_ofdm_ns_pwr_idx_diff {
#ifdef __LITTLE_ENDIAN
s8 ofdm_3s:4;
s8 ofdm_2s:4;
s8 ofdm_4s:4;
s8 res:4;
#else
s8 res:4;
s8 ofdm_4s:4;
s8 ofdm_2s:4;
s8 ofdm_3s:4;
#endif
} __packed;
struct rtw_5g_vht_ns_pwr_idx_diff {
#ifdef __LITTLE_ENDIAN
s8 bw160:4;
s8 bw80:4;
#else
s8 bw80:4;
s8 bw160:4;
#endif
} __packed;
struct rtw_5g_txpwr_idx {
u8 bw40_base[14];
struct rtw_5g_ht_1s_pwr_idx_diff ht_1s_diff;
struct rtw_5g_ht_ns_pwr_idx_diff ht_2s_diff;
struct rtw_5g_ht_ns_pwr_idx_diff ht_3s_diff;
struct rtw_5g_ht_ns_pwr_idx_diff ht_4s_diff;
struct rtw_5g_ofdm_ns_pwr_idx_diff ofdm_diff;
struct rtw_5g_vht_ns_pwr_idx_diff vht_1s_diff;
struct rtw_5g_vht_ns_pwr_idx_diff vht_2s_diff;
struct rtw_5g_vht_ns_pwr_idx_diff vht_3s_diff;
struct rtw_5g_vht_ns_pwr_idx_diff vht_4s_diff;
};
struct rtw_txpwr_idx {
struct rtw_2g_txpwr_idx pwr_idx_2g;
struct rtw_5g_txpwr_idx pwr_idx_5g;
};
struct rtw_timer_list {
struct timer_list timer;
void (*function)(void *data);
void *args;
};
struct rtw_channel_params {
u8 center_chan;
u8 bandwidth;
u8 primary_chan_idx;
};
struct rtw_hw_reg {
u32 addr;
u32 mask;
};
struct rtw_backup_info {
u8 len;
u32 reg;
u32 val;
};
enum rtw_vif_port_set {
PORT_SET_MAC_ADDR = BIT(0),
PORT_SET_BSSID = BIT(1),
PORT_SET_NET_TYPE = BIT(2),
PORT_SET_AID = BIT(3),
};
struct rtw_vif_port {
struct rtw_hw_reg mac_addr;
struct rtw_hw_reg bssid;
struct rtw_hw_reg net_type;
struct rtw_hw_reg aid;
};
struct rtw_tx_pkt_info {
u32 tx_pkt_size;
u8 offset;
u8 pkt_offset;
u8 mac_id;
u8 rate_id;
u8 rate;
u8 qsel;
u8 bw;
u8 sec_type;
u8 sn;
bool ampdu_en;
u8 ampdu_factor;
u8 ampdu_density;
u16 seq;
bool stbc;
bool ldpc;
bool dis_rate_fallback;
bool bmc;
bool use_rate;
bool ls;
bool fs;
bool short_gi;
bool report;
};
struct rtw_rx_pkt_stat {
bool phy_status;
bool icv_err;
bool crc_err;
bool decrypted;
bool is_c2h;
s32 signal_power;
u16 pkt_len;
u8 bw;
u8 drv_info_sz;
u8 shift;
u8 rate;
u8 mac_id;
u8 cam_id;
u8 ppdu_cnt;
u32 tsf_low;
s8 rx_power[RTW_RF_PATH_MAX];
u8 rssi;
u8 rxsc;
struct rtw_sta_info *si;
struct ieee80211_vif *vif;
};
struct rtw_traffic_stats {
/* units in bytes */
u64 tx_unicast;
u64 rx_unicast;
/* count for packets */
u64 tx_cnt;
u64 rx_cnt;
/* units in Mbps */
u32 tx_throughput;
u32 rx_throughput;
};
enum rtw_lps_mode {
RTW_MODE_ACTIVE = 0,
RTW_MODE_LPS = 1,
RTW_MODE_WMM_PS = 2,
};
enum rtw_pwr_state {
RTW_RF_OFF = 0x0,
RTW_RF_ON = 0x4,
RTW_ALL_ON = 0xc,
};
struct rtw_lps_conf {
/* the interface to enter lps */
struct rtw_vif *rtwvif;
enum rtw_lps_mode mode;
enum rtw_pwr_state state;
u8 awake_interval;
u8 rlbm;
u8 smart_ps;
u8 port_id;
};
enum rtw_hw_key_type {
RTW_CAM_NONE = 0,
RTW_CAM_WEP40 = 1,
RTW_CAM_TKIP = 2,
RTW_CAM_AES = 4,
RTW_CAM_WEP104 = 5,
};
struct rtw_cam_entry {
bool valid;
bool group;
u8 addr[ETH_ALEN];
u8 hw_key_type;
struct ieee80211_key_conf *key;
};
struct rtw_sec_desc {
/* search strategy */
bool default_key_search;
u32 total_cam_num;
struct rtw_cam_entry cam_table[RTW_MAX_SEC_CAM_NUM];
DECLARE_BITMAP(cam_map, RTW_MAX_SEC_CAM_NUM);
};
struct rtw_tx_report {
/* protect the tx report queue */
spinlock_t q_lock;
struct sk_buff_head queue;
atomic_t sn;
struct timer_list purge_timer;
};
#define RTW_BC_MC_MACID 1
DECLARE_EWMA(rssi, 10, 16);
struct rtw_sta_info {
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ewma_rssi avg_rssi;
u8 rssi_level;
u8 mac_id;
u8 rate_id;
enum rtw_bandwidth bw_mode;
enum rtw_rf_type rf_type;
enum rtw_wireless_set wireless_set;
u8 stbc_en:2;
u8 ldpc_en:2;
bool sgi_enable;
bool vht_enable;
bool updated;
u8 init_ra_lv;
u64 ra_mask;
};
struct rtw_vif {
struct ieee80211_vif *vif;
enum rtw_net_type net_type;
u16 aid;
u8 mac_addr[ETH_ALEN];
u8 bssid[ETH_ALEN];
u8 port;
const struct rtw_vif_port *conf;
struct rtw_traffic_stats stats;
bool in_lps;
};
struct rtw_regulatory {
char alpha2[2];
u8 chplan;
u8 txpwr_regd;
};
struct rtw_chip_ops {
int (*mac_init)(struct rtw_dev *rtwdev);
int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map);
void (*phy_set_param)(struct rtw_dev *rtwdev);
void (*set_channel)(struct rtw_dev *rtwdev, u8 channel,
u8 bandwidth, u8 primary_chan_idx);
void (*query_rx_desc)(struct rtw_dev *rtwdev, u8 *rx_desc,
struct rtw_rx_pkt_stat *pkt_stat,
struct ieee80211_rx_status *rx_status);
u32 (*read_rf)(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask);
bool (*write_rf)(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data);
void (*set_tx_power_index)(struct rtw_dev *rtwdev);
int (*rsvd_page_dump)(struct rtw_dev *rtwdev, u8 *buf, u32 offset,
u32 size);
void (*set_antenna)(struct rtw_dev *rtwdev, u8 antenna_tx,
u8 antenna_rx);
void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable);
void (*false_alarm_statistics)(struct rtw_dev *rtwdev);
void (*do_iqk)(struct rtw_dev *rtwdev);
};
#define RTW_PWR_POLLING_CNT 20000
#define RTW_PWR_CMD_READ 0x00
#define RTW_PWR_CMD_WRITE 0x01
#define RTW_PWR_CMD_POLLING 0x02
#define RTW_PWR_CMD_DELAY 0x03
#define RTW_PWR_CMD_END 0x04
/* define the base address of each block */
#define RTW_PWR_ADDR_MAC 0x00
#define RTW_PWR_ADDR_USB 0x01
#define RTW_PWR_ADDR_PCIE 0x02
#define RTW_PWR_ADDR_SDIO 0x03
#define RTW_PWR_INTF_SDIO_MSK BIT(0)
#define RTW_PWR_INTF_USB_MSK BIT(1)
#define RTW_PWR_INTF_PCI_MSK BIT(2)
#define RTW_PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
#define RTW_PWR_CUT_A_MSK BIT(1)
#define RTW_PWR_CUT_B_MSK BIT(2)
#define RTW_PWR_CUT_C_MSK BIT(3)
#define RTW_PWR_CUT_D_MSK BIT(4)
#define RTW_PWR_CUT_E_MSK BIT(5)
#define RTW_PWR_CUT_F_MSK BIT(6)
#define RTW_PWR_CUT_G_MSK BIT(7)
#define RTW_PWR_CUT_ALL_MSK 0xFF
enum rtw_pwr_seq_cmd_delay_unit {
RTW_PWR_DELAY_US,
RTW_PWR_DELAY_MS,
};
struct rtw_pwr_seq_cmd {
u16 offset;
u8 cut_mask;
u8 intf_mask;
u8 base:4;
u8 cmd:4;
u8 mask;
u8 value;
};
enum rtw_chip_ver {
RTW_CHIP_VER_CUT_A = 0x00,
RTW_CHIP_VER_CUT_B = 0x01,
RTW_CHIP_VER_CUT_C = 0x02,
RTW_CHIP_VER_CUT_D = 0x03,
RTW_CHIP_VER_CUT_E = 0x04,
RTW_CHIP_VER_CUT_F = 0x05,
RTW_CHIP_VER_CUT_G = 0x06,
};
#define RTW_INTF_PHY_PLATFORM_ALL 0
enum rtw_intf_phy_cut {
RTW_INTF_PHY_CUT_A = BIT(0),
RTW_INTF_PHY_CUT_B = BIT(1),
RTW_INTF_PHY_CUT_C = BIT(2),
RTW_INTF_PHY_CUT_D = BIT(3),
RTW_INTF_PHY_CUT_E = BIT(4),
RTW_INTF_PHY_CUT_F = BIT(5),
RTW_INTF_PHY_CUT_G = BIT(6),
RTW_INTF_PHY_CUT_ALL = 0xFFFF,
};
enum rtw_ip_sel {
RTW_IP_SEL_PHY = 0,
RTW_IP_SEL_MAC = 1,
RTW_IP_SEL_DBI = 2,
RTW_IP_SEL_UNDEF = 0xFFFF
};
enum rtw_pq_map_id {
RTW_PQ_MAP_VO = 0x0,
RTW_PQ_MAP_VI = 0x1,
RTW_PQ_MAP_BE = 0x2,
RTW_PQ_MAP_BK = 0x3,
RTW_PQ_MAP_MG = 0x4,
RTW_PQ_MAP_HI = 0x5,
RTW_PQ_MAP_NUM = 0x6,
RTW_PQ_MAP_UNDEF,
};
enum rtw_dma_mapping {
RTW_DMA_MAPPING_EXTRA = 0,
RTW_DMA_MAPPING_LOW = 1,
RTW_DMA_MAPPING_NORMAL = 2,
RTW_DMA_MAPPING_HIGH = 3,
RTW_DMA_MAPPING_UNDEF,
};
struct rtw_rqpn {
enum rtw_dma_mapping dma_map_vo;
enum rtw_dma_mapping dma_map_vi;
enum rtw_dma_mapping dma_map_be;
enum rtw_dma_mapping dma_map_bk;
enum rtw_dma_mapping dma_map_mg;
enum rtw_dma_mapping dma_map_hi;
};
struct rtw_page_table {
u16 hq_num;
u16 nq_num;
u16 lq_num;
u16 exq_num;
u16 gapq_num;
};
struct rtw_intf_phy_para {
u16 offset;
u16 value;
u16 ip_sel;
u16 cut_mask;
u16 platform;
};
struct rtw_intf_phy_para_table {
struct rtw_intf_phy_para *usb2_para;
struct rtw_intf_phy_para *usb3_para;
struct rtw_intf_phy_para *gen1_para;
struct rtw_intf_phy_para *gen2_para;
u8 n_usb2_para;
u8 n_usb3_para;
u8 n_gen1_para;
u8 n_gen2_para;
};
struct rtw_table {
const void *data;
const u32 size;
void (*parse)(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
void (*do_cfg)(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data);
enum rtw_rf_path rf_path;
};
static inline void rtw_load_table(struct rtw_dev *rtwdev,
const struct rtw_table *tbl)
{
(*tbl->parse)(rtwdev, tbl);
}
enum rtw_rfe_fem {
RTW_RFE_IFEM,
RTW_RFE_EFEM,
RTW_RFE_IFEM2G_EFEM5G,
RTW_RFE_NUM,
};
struct rtw_rfe_def {
const struct rtw_table *phy_pg_tbl;
const struct rtw_table *txpwr_lmt_tbl;
};
#define RTW_DEF_RFE(chip, bb_pg, pwrlmt) { \
.phy_pg_tbl = &rtw ## chip ## _bb_pg_type ## bb_pg ## _tbl, \
.txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \
}
/* hardware configuration for each IC */
struct rtw_chip_info {
struct rtw_chip_ops *ops;
u8 id;
const char *fw_name;
u8 tx_pkt_desc_sz;
u8 tx_buf_desc_sz;
u8 rx_pkt_desc_sz;
u8 rx_buf_desc_sz;
u32 phy_efuse_size;
u32 log_efuse_size;
u32 ptct_efuse_size;
u32 txff_size;
u32 rxff_size;
u8 band;
u8 page_size;
u8 csi_buf_pg_num;
u8 dig_max;
u8 dig_min;
u8 txgi_factor;
bool is_pwr_by_rate_dec;
u8 max_power_index;
bool ht_supported;
bool vht_supported;
/* init values */
u8 sys_func_en;
struct rtw_pwr_seq_cmd **pwr_on_seq;
struct rtw_pwr_seq_cmd **pwr_off_seq;
struct rtw_rqpn *rqpn_table;
struct rtw_page_table *page_table;
struct rtw_intf_phy_para_table *intf_table;
struct rtw_hw_reg *dig;
u32 rf_base_addr[2];
u32 rf_sipi_addr[2];
const struct rtw_table *mac_tbl;
const struct rtw_table *agc_tbl;
const struct rtw_table *bb_tbl;
const struct rtw_table *rf_tbl[RTW_RF_PATH_MAX];
const struct rtw_table *rfk_init_tbl;
const struct rtw_rfe_def *rfe_defs;
u32 rfe_defs_size;
};
struct rtw_dm_info {
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
u32 total_fa_cnt;
u8 min_rssi;
u8 pre_min_rssi;
u16 fa_history[4];
u8 igi_history[4];
u8 igi_bitmap;
bool damping;
u8 damping_cnt;
u8 damping_rssi;
u8 cck_gi_u_bnd;
u8 cck_gi_l_bnd;
};
struct rtw_efuse {
u32 size;
u32 physical_size;
u32 logical_size;
u32 protect_size;
u8 addr[ETH_ALEN];
u8 channel_plan;
u8 country_code[2];
u8 rfe_option;
u8 thermal_meter;
u8 crystal_cap;
u8 ant_div_cfg;
u8 ant_div_type;
u8 regd;
u8 lna_type_2g;
u8 lna_type_5g;
u8 glna_type;
u8 alna_type;
bool ext_lna_2g;
bool ext_lna_5g;
u8 pa_type_2g;
u8 pa_type_5g;
u8 gpa_type;
u8 apa_type;
bool ext_pa_2g;
bool ext_pa_5g;
bool btcoex;
/* bt share antenna with wifi */
bool share_ant;
u8 bt_setting;
struct {
u8 hci;
u8 bw;
u8 ptcl;
u8 nss;
u8 ant_num;
} hw_cap;
struct rtw_txpwr_idx txpwr_idx_table[4];
};
struct rtw_phy_cond {
#ifdef __LITTLE_ENDIAN
u32 rfe:8;
u32 intf:4;
u32 pkg:4;
u32 plat:4;
u32 intf_rsvd:4;
u32 cut:4;
u32 branch:2;
u32 neg:1;
u32 pos:1;
#else
u32 pos:1;
u32 neg:1;
u32 branch:2;
u32 cut:4;
u32 intf_rsvd:4;
u32 plat:4;
u32 pkg:4;
u32 intf:4;
u32 rfe:8;
#endif
/* for intf:4 */
#define INTF_PCIE BIT(0)
#define INTF_USB BIT(1)
#define INTF_SDIO BIT(2)
/* for branch:2 */
#define BRANCH_IF 0
#define BRANCH_ELIF 1
#define BRANCH_ELSE 2
#define BRANCH_ENDIF 3
};
struct rtw_fifo_conf {
/* tx fifo information */
u16 rsvd_boundary;
u16 rsvd_pg_num;
u16 rsvd_drv_pg_num;
u16 txff_pg_num;
u16 acq_pg_num;
u16 rsvd_drv_addr;
u16 rsvd_h2c_info_addr;
u16 rsvd_h2c_sta_info_addr;
u16 rsvd_h2cq_addr;
u16 rsvd_cpu_instr_addr;
u16 rsvd_fw_txbuf_addr;
u16 rsvd_csibuf_addr;
enum rtw_dma_mapping pq_map[RTW_PQ_MAP_NUM];
};
struct rtw_fw_state {
const struct firmware *firmware;
struct completion completion;
u16 version;
u8 sub_version;
u8 sub_index;
u16 h2c_version;
};
struct rtw_hal {
u32 rcr;
u32 chip_version;
u8 fab_version;
u8 cut_version;
u8 mp_chip;
u8 oem_id;
struct rtw_phy_cond phy_cond;
u8 ps_mode;
u8 current_channel;
u8 current_band_width;
u8 current_band_type;
u8 sec_ch_offset;
u8 rf_type;
u8 rf_path_num;
u8 antenna_tx;
u8 antenna_rx;
/* protect tx power section */
struct mutex tx_power_mutex;
s8 tx_pwr_by_rate_offset_2g[RTW_RF_PATH_MAX]
[DESC_RATE_MAX];
s8 tx_pwr_by_rate_offset_5g[RTW_RF_PATH_MAX]
[DESC_RATE_MAX];
s8 tx_pwr_by_rate_base_2g[RTW_RF_PATH_MAX]
[RTW_RATE_SECTION_MAX];
s8 tx_pwr_by_rate_base_5g[RTW_RF_PATH_MAX]
[RTW_RATE_SECTION_MAX];
s8 tx_pwr_limit_2g[RTW_REGD_MAX]
[RTW_CHANNEL_WIDTH_MAX]
[RTW_RATE_SECTION_MAX]
[RTW_MAX_CHANNEL_NUM_2G];
s8 tx_pwr_limit_5g[RTW_REGD_MAX]
[RTW_CHANNEL_WIDTH_MAX]
[RTW_RATE_SECTION_MAX]
[RTW_MAX_CHANNEL_NUM_5G];
s8 tx_pwr_tbl[RTW_RF_PATH_MAX]
[DESC_RATE_MAX];
};
struct rtw_dev {
struct ieee80211_hw *hw;
struct device *dev;
struct rtw_hci hci;
struct rtw_chip_info *chip;
struct rtw_hal hal;
struct rtw_fifo_conf fifo;
struct rtw_fw_state fw;
struct rtw_efuse efuse;
struct rtw_sec_desc sec;
struct rtw_traffic_stats stats;
struct rtw_regulatory regd;
struct rtw_dm_info dm_info;
/* ensures exclusive access from mac80211 callbacks */
struct mutex mutex;
/* lock for dm to use */
spinlock_t dm_lock;
/* read/write rf register */
spinlock_t rf_lock;
/* watch dog every 2 sec */
struct delayed_work watch_dog_work;
u32 watch_dog_cnt;
struct list_head rsvd_page_list;
/* c2h cmd queue & handler work */
struct sk_buff_head c2h_queue;
struct work_struct c2h_work;
struct rtw_tx_report tx_report;
struct {
/* incicate the mail box to use with fw */
u8 last_box_num;
/* protect to send h2c to fw */
spinlock_t lock;
u32 seq;
} h2c;
/* lps power state & handler work */
struct rtw_lps_conf lps_conf;
struct delayed_work lps_work;
struct dentry *debugfs;
u8 sta_cnt;
DECLARE_BITMAP(mac_id_map, RTW_MAX_MAC_ID_NUM);
DECLARE_BITMAP(flags, NUM_OF_RTW_FLAGS);
u8 mp_mode;
/* hci related data, must be last */
u8 priv[0] __aligned(sizeof(void *));
};
#include "hci.h"
static inline bool rtw_flag_check(struct rtw_dev *rtwdev, enum rtw_flags flag)
{
return test_bit(flag, rtwdev->flags);
}
static inline void rtw_flag_clear(struct rtw_dev *rtwdev, enum rtw_flags flag)
{
clear_bit(flag, rtwdev->flags);
}
static inline void rtw_flag_set(struct rtw_dev *rtwdev, enum rtw_flags flag)
{
set_bit(flag, rtwdev->flags);
}
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *ch_param);
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val);
bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value);
void rtw_restore_reg(struct rtw_dev *rtwdev,
struct rtw_backup_info *bckp, u32 num);
void rtw_set_channel(struct rtw_dev *rtwdev);
void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
u32 config);
void rtw_tx_report_purge_timer(struct timer_list *t);
void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
int rtw_core_start(struct rtw_dev *rtwdev);
void rtw_core_stop(struct rtw_dev *rtwdev);
int rtw_chip_info_setup(struct rtw_dev *rtwdev);
int rtw_core_init(struct rtw_dev *rtwdev);
void rtw_core_deinit(struct rtw_dev *rtwdev);
int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include <linux/module.h>
#include <linux/pci.h>
#include "main.h"
#include "pci.h"
#include "tx.h"
#include "rx.h"
#include "debug.h"
static u32 rtw_pci_tx_queue_idx_addr[] = {
[RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ,
[RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ,
[RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ,
[RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ,
[RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ,
[RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q,
[RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ,
};
static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
{
switch (queue) {
case RTW_TX_QUEUE_BCN:
return TX_DESC_QSEL_BEACON;
case RTW_TX_QUEUE_H2C:
return TX_DESC_QSEL_H2C;
case RTW_TX_QUEUE_MGMT:
return TX_DESC_QSEL_MGMT;
case RTW_TX_QUEUE_HI0:
return TX_DESC_QSEL_HIGH;
default:
return skb->priority;
}
};
static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
return readb(rtwpci->mmap + addr);
}
static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
return readw(rtwpci->mmap + addr);
}
static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
return readl(rtwpci->mmap + addr);
}
static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
writeb(val, rtwpci->mmap + addr);
}
static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
writew(val, rtwpci->mmap + addr);
}
static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
writel(val, rtwpci->mmap + addr);
}
static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
{
int offset = tx_ring->r.desc_size * idx;
return tx_ring->r.head + offset;
}
static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
struct rtw_pci_tx_ring *tx_ring)
{
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
struct rtw_pci_tx_data *tx_data;
struct sk_buff *skb, *tmp;
dma_addr_t dma;
u8 *head = tx_ring->r.head;
u32 len = tx_ring->r.len;
int ring_sz = len * tx_ring->r.desc_size;
/* free every skb remained in tx list */
skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
__skb_unlink(skb, &tx_ring->queue);
tx_data = rtw_pci_get_tx_data(skb);
dma = tx_data->dma;
pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
}
/* free the ring itself */
pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
tx_ring->r.head = NULL;
}
static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
struct rtw_pci_rx_ring *rx_ring)
{
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
struct sk_buff *skb;
dma_addr_t dma;
u8 *head = rx_ring->r.head;
int buf_sz = RTK_PCI_RX_BUF_SIZE;
int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
int i;
for (i = 0; i < rx_ring->r.len; i++) {
skb = rx_ring->buf[i];
if (!skb)
continue;
dma = *((dma_addr_t *)skb->cb);
pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
rx_ring->buf[i] = NULL;
}
pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
}
static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *tx_ring;
struct rtw_pci_rx_ring *rx_ring;
int i;
for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
tx_ring = &rtwpci->tx_rings[i];
rtw_pci_free_tx_ring(rtwdev, tx_ring);
}
for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
rx_ring = &rtwpci->rx_rings[i];
rtw_pci_free_rx_ring(rtwdev, rx_ring);
}
}
static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
struct rtw_pci_tx_ring *tx_ring,
u8 desc_size, u32 len)
{
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
int ring_sz = desc_size * len;
dma_addr_t dma;
u8 *head;
head = pci_zalloc_consistent(pdev, ring_sz, &dma);
if (!head) {
rtw_err(rtwdev, "failed to allocate tx ring\n");
return -ENOMEM;
}
skb_queue_head_init(&tx_ring->queue);
tx_ring->r.head = head;
tx_ring->r.dma = dma;
tx_ring->r.len = len;
tx_ring->r.desc_size = desc_size;
tx_ring->r.wp = 0;
tx_ring->r.rp = 0;
return 0;
}
static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
struct rtw_pci_rx_ring *rx_ring,
u32 idx, u32 desc_sz)
{
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
struct rtw_pci_rx_buffer_desc *buf_desc;
int buf_sz = RTK_PCI_RX_BUF_SIZE;
dma_addr_t dma;
if (!skb)
return -EINVAL;
dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pdev, dma))
return -EBUSY;
*((dma_addr_t *)skb->cb) = dma;
buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
idx * desc_sz);
memset(buf_desc, 0, sizeof(*buf_desc));
buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
buf_desc->dma = cpu_to_le32(dma);
return 0;
}
static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
struct rtw_pci_rx_ring *rx_ring,
u8 desc_size, u32 len)
{
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
struct sk_buff *skb = NULL;
dma_addr_t dma;
u8 *head;
int ring_sz = desc_size * len;
int buf_sz = RTK_PCI_RX_BUF_SIZE;
int i, allocated;
int ret = 0;
head = pci_zalloc_consistent(pdev, ring_sz, &dma);
if (!head) {
rtw_err(rtwdev, "failed to allocate rx ring\n");
return -ENOMEM;
}
rx_ring->r.head = head;
for (i = 0; i < len; i++) {
skb = dev_alloc_skb(buf_sz);
if (!skb) {
allocated = i;
ret = -ENOMEM;
goto err_out;
}
memset(skb->data, 0, buf_sz);
rx_ring->buf[i] = skb;
ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
if (ret) {
allocated = i;
dev_kfree_skb_any(skb);
goto err_out;
}
}
rx_ring->r.dma = dma;
rx_ring->r.len = len;
rx_ring->r.desc_size = desc_size;
rx_ring->r.wp = 0;
rx_ring->r.rp = 0;
return 0;
err_out:
for (i = 0; i < allocated; i++) {
skb = rx_ring->buf[i];
if (!skb)
continue;
dma = *((dma_addr_t *)skb->cb);
pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
rx_ring->buf[i] = NULL;
}
pci_free_consistent(pdev, ring_sz, head, dma);
rtw_err(rtwdev, "failed to init rx buffer\n");
return ret;
}
static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *tx_ring;
struct rtw_pci_rx_ring *rx_ring;
struct rtw_chip_info *chip = rtwdev->chip;
int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
int tx_desc_size, rx_desc_size;
u32 len;
int ret;
tx_desc_size = chip->tx_buf_desc_sz;
for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
tx_ring = &rtwpci->tx_rings[i];
len = max_num_of_tx_queue(i);
ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
if (ret)
goto out;
}
rx_desc_size = chip->rx_buf_desc_sz;
for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
rx_ring = &rtwpci->rx_rings[j];
ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
RTK_MAX_RX_DESC_NUM);
if (ret)
goto out;
}
return 0;
out:
tx_alloced = i;
for (i = 0; i < tx_alloced; i++) {
tx_ring = &rtwpci->tx_rings[i];
rtw_pci_free_tx_ring(rtwdev, tx_ring);
}
rx_alloced = j;
for (j = 0; j < rx_alloced; j++) {
rx_ring = &rtwpci->rx_rings[j];
rtw_pci_free_rx_ring(rtwdev, rx_ring);
}
return ret;
}
static void rtw_pci_deinit(struct rtw_dev *rtwdev)
{
rtw_pci_free_trx_ring(rtwdev);
}
static int rtw_pci_init(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
int ret = 0;
rtwpci->irq_mask[0] = IMR_HIGHDOK |
IMR_MGNTDOK |
IMR_BKDOK |
IMR_BEDOK |
IMR_VIDOK |
IMR_VODOK |
IMR_ROK |
IMR_BCNDMAINT_E |
0;
rtwpci->irq_mask[1] = IMR_TXFOVW |
0;
rtwpci->irq_mask[3] = IMR_H2CDOK |
0;
spin_lock_init(&rtwpci->irq_lock);
ret = rtw_pci_init_trx_ring(rtwdev);
return ret;
}
static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
u32 len;
u8 tmp;
dma_addr_t dma;
tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len);
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & 0xfff);
rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
/* reset read/write point */
rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
/* rest H2C Queue index */
rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX);
rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX);
}
static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
{
rtw_pci_reset_buf_desc(rtwdev);
}
static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
struct rtw_pci *rtwpci)
{
rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
rtwpci->irq_enabled = true;
}
static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
struct rtw_pci *rtwpci)
{
rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
rtwpci->irq_enabled = false;
}
static int rtw_pci_setup(struct rtw_dev *rtwdev)
{
rtw_pci_reset_trx_ring(rtwdev);
return 0;
}
static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
{
/* reset dma and rx tag */
rtw_write32_set(rtwdev, RTK_PCI_CTRL,
BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
rtwpci->rx_tag = 0;
}
static int rtw_pci_start(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
unsigned long flags;
rtw_pci_dma_reset(rtwdev, rtwpci);
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtw_pci_enable_interrupt(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
return 0;
}
static void rtw_pci_stop(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
unsigned long flags;
spin_lock_irqsave(&rtwpci->irq_lock, flags);
rtw_pci_disable_interrupt(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
}
static u8 ac_to_hwq[] = {
[0] = RTW_TX_QUEUE_VO,
[1] = RTW_TX_QUEUE_VI,
[2] = RTW_TX_QUEUE_BE,
[3] = RTW_TX_QUEUE_BK,
};
static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
u8 q_mapping = skb_get_queue_mapping(skb);
u8 queue;
if (unlikely(ieee80211_is_beacon(fc)))
queue = RTW_TX_QUEUE_BCN;
else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
queue = RTW_TX_QUEUE_MGMT;
else
queue = ac_to_hwq[q_mapping];
return queue;
}
static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
struct rtw_pci_tx_ring *ring)
{
struct sk_buff *prev = skb_dequeue(&ring->queue);
struct rtw_pci_tx_data *tx_data;
dma_addr_t dma;
if (!prev)
return;
tx_data = rtw_pci_get_tx_data(prev);
dma = tx_data->dma;
pci_unmap_single(rtwpci->pdev, dma, prev->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_any(prev);
}
static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
struct rtw_pci_rx_ring *rx_ring,
u32 idx)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci_rx_buffer_desc *buf_desc;
u32 desc_sz = chip->rx_buf_desc_sz;
u16 total_pkt_size;
buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
idx * desc_sz);
total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
/* rx tag mismatch, throw a warning */
if (total_pkt_size != rtwpci->rx_tag)
rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
}
static int rtw_pci_xmit(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb, u8 queue)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci_tx_ring *ring;
struct rtw_pci_tx_data *tx_data;
dma_addr_t dma;
u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
u32 size;
u32 psb_len;
u8 *pkt_desc;
struct rtw_pci_tx_buffer_desc *buf_desc;
u32 bd_idx;
ring = &rtwpci->tx_rings[queue];
size = skb->len;
if (queue == RTW_TX_QUEUE_BCN)
rtw_pci_release_rsvd_page(rtwpci, ring);
else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
return -ENOSPC;
pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
memset(pkt_desc, 0, tx_pkt_desc_sz);
pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
rtw_tx_fill_tx_desc(pkt_info, skb);
dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(rtwpci->pdev, dma))
return -EBUSY;
/* after this we got dma mapped, there is no way back */
buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
memset(buf_desc, 0, tx_buf_desc_sz);
psb_len = (skb->len - 1) / 128 + 1;
if (queue == RTW_TX_QUEUE_BCN)
psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
buf_desc[0].psb_len = cpu_to_le16(psb_len);
buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
buf_desc[0].dma = cpu_to_le32(dma);
buf_desc[1].buf_size = cpu_to_le16(size);
buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
tx_data = rtw_pci_get_tx_data(skb);
tx_data->dma = dma;
tx_data->sn = pkt_info->sn;
skb_queue_tail(&ring->queue, skb);
/* kick off tx queue */
if (queue != RTW_TX_QUEUE_BCN) {
if (++ring->r.wp >= ring->r.len)
ring->r.wp = 0;
bd_idx = rtw_pci_tx_queue_idx_addr[queue];
rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff);
} else {
u32 reg_bcn_work;
reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
}
return 0;
}
static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
u32 size)
{
struct sk_buff *skb;
struct rtw_tx_pkt_info pkt_info;
u32 tx_pkt_desc_sz;
u32 length;
tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
length = size + tx_pkt_desc_sz;
skb = dev_alloc_skb(length);
if (!skb)
return -ENOMEM;
skb_reserve(skb, tx_pkt_desc_sz);
memcpy((u8 *)skb_put(skb, size), buf, size);
memset(&pkt_info, 0, sizeof(pkt_info));
pkt_info.tx_pkt_size = size;
pkt_info.offset = tx_pkt_desc_sz;
return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
}
static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
{
struct sk_buff *skb;
struct rtw_tx_pkt_info pkt_info;
u32 tx_pkt_desc_sz;
u32 length;
tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
length = size + tx_pkt_desc_sz;
skb = dev_alloc_skb(length);
if (!skb)
return -ENOMEM;
skb_reserve(skb, tx_pkt_desc_sz);
memcpy((u8 *)skb_put(skb, size), buf, size);
memset(&pkt_info, 0, sizeof(pkt_info));
pkt_info.tx_pkt_size = size;
return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
}
static int rtw_pci_tx(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *ring;
u8 queue = rtw_hw_queue_mapping(skb);
int ret;
ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue);
if (ret)
return ret;
ring = &rtwpci->tx_rings[queue];
if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
ring->queue_stopped = true;
}
return 0;
}
static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
u8 hw_queue)
{
struct ieee80211_hw *hw = rtwdev->hw;
struct ieee80211_tx_info *info;
struct rtw_pci_tx_ring *ring;
struct rtw_pci_tx_data *tx_data;
struct sk_buff *skb;
u32 count;
u32 bd_idx_addr;
u32 bd_idx, cur_rp;
u16 q_map;
ring = &rtwpci->tx_rings[hw_queue];
bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
bd_idx = rtw_read32(rtwdev, bd_idx_addr);
cur_rp = bd_idx >> 16;
cur_rp &= 0xfff;
if (cur_rp >= ring->r.rp)
count = cur_rp - ring->r.rp;
else
count = ring->r.len - (ring->r.rp - cur_rp);
while (count--) {
skb = skb_dequeue(&ring->queue);
tx_data = rtw_pci_get_tx_data(skb);
pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
PCI_DMA_TODEVICE);
/* just free command packets from host to card */
if (hw_queue == RTW_TX_QUEUE_H2C) {
dev_kfree_skb_irq(skb);
continue;
}
if (ring->queue_stopped &&
avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
q_map = skb_get_queue_mapping(skb);
ieee80211_wake_queue(hw, q_map);
ring->queue_stopped = false;
}
skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
info = IEEE80211_SKB_CB(skb);
/* enqueue to wait for tx report */
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
continue;
}
/* always ACK for others, then they won't be marked as drop */
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
else
info->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_info_clear_status(info);
ieee80211_tx_status_irqsafe(hw, skb);
}
ring->r.rp = cur_rp;
}
static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
u8 hw_queue)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci_rx_ring *ring;
struct rtw_rx_pkt_stat pkt_stat;
struct ieee80211_rx_status rx_status;
struct sk_buff *skb, *new;
u32 cur_wp, cur_rp, tmp;
u32 count;
u32 pkt_offset;
u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
u32 buf_desc_sz = chip->rx_buf_desc_sz;
u8 *rx_desc;
dma_addr_t dma;
ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
cur_wp = tmp >> 16;
cur_wp &= 0xfff;
if (cur_wp >= ring->r.wp)
count = cur_wp - ring->r.wp;
else
count = ring->r.len - (ring->r.wp - cur_wp);
cur_rp = ring->r.rp;
while (count--) {
rtw_pci_dma_check(rtwdev, ring, cur_rp);
skb = ring->buf[cur_rp];
dma = *((dma_addr_t *)skb->cb);
pci_unmap_single(rtwpci->pdev, dma, RTK_PCI_RX_BUF_SIZE,
PCI_DMA_FROMDEVICE);
rx_desc = skb->data;
chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
/* offset from rx_desc to payload */
pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
pkt_stat.shift;
if (pkt_stat.is_c2h) {
/* keep rx_desc, halmac needs it */
skb_put(skb, pkt_stat.pkt_len + pkt_offset);
/* pass offset for further operation */
*((u32 *)skb->cb) = pkt_offset;
skb_queue_tail(&rtwdev->c2h_queue, skb);
ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
} else {
/* remove rx_desc, maybe use skb_pull? */
skb_put(skb, pkt_stat.pkt_len);
skb_reserve(skb, pkt_offset);
/* alloc a smaller skb to mac80211 */
new = dev_alloc_skb(pkt_stat.pkt_len);
if (!new) {
new = skb;
} else {
skb_put_data(new, skb->data, skb->len);
dev_kfree_skb_any(skb);
}
/* TODO: merge into rx.c */
rtw_rx_stats(rtwdev, pkt_stat.vif, skb);
memcpy(new->cb, &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(rtwdev->hw, new);
}
/* skb delivered to mac80211, alloc a new one in rx ring */
new = dev_alloc_skb(RTK_PCI_RX_BUF_SIZE);
if (WARN(!new, "rx routine starvation\n"))
return;
ring->buf[cur_rp] = new;
rtw_pci_reset_rx_desc(rtwdev, new, ring, cur_rp, buf_desc_sz);
/* host read next element in ring */
if (++cur_rp >= ring->r.len)
cur_rp = 0;
}
ring->r.rp = cur_rp;
ring->r.wp = cur_wp;
rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
}
static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
struct rtw_pci *rtwpci, u32 *irq_status)
{
irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
irq_status[0] &= rtwpci->irq_mask[0];
irq_status[1] &= rtwpci->irq_mask[1];
irq_status[3] &= rtwpci->irq_mask[3];
rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
}
static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
{
struct rtw_dev *rtwdev = dev;
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
u32 irq_status[4];
spin_lock(&rtwpci->irq_lock);
if (!rtwpci->irq_enabled)
goto out;
rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
if (irq_status[0] & IMR_MGNTDOK)
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
if (irq_status[0] & IMR_HIGHDOK)
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
if (irq_status[0] & IMR_BEDOK)
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
if (irq_status[0] & IMR_BKDOK)
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
if (irq_status[0] & IMR_VODOK)
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
if (irq_status[0] & IMR_VIDOK)
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
if (irq_status[3] & IMR_H2CDOK)
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
if (irq_status[0] & IMR_ROK)
rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
out:
spin_unlock(&rtwpci->irq_lock);
return IRQ_HANDLED;
}
static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
unsigned long len;
u8 bar_id = 2;
int ret;
ret = pci_request_regions(pdev, KBUILD_MODNAME);
if (ret) {
rtw_err(rtwdev, "failed to request pci regions\n");
return ret;
}
len = pci_resource_len(pdev, bar_id);
rtwpci->mmap = pci_iomap(pdev, bar_id, len);
if (!rtwpci->mmap) {
rtw_err(rtwdev, "failed to map pci memory\n");
return -ENOMEM;
}
return 0;
}
static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
if (rtwpci->mmap) {
pci_iounmap(pdev, rtwpci->mmap);
pci_release_regions(pdev);
}
}
static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
{
u16 write_addr;
u16 remainder = addr & 0x3;
u8 flag;
u8 cnt = 20;
write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12)));
rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, 0x01);
flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
while (flag && (cnt != 0)) {
udelay(10);
flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
cnt--;
}
WARN(flag, "DBI write fail\n");
}
static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
{
u8 page;
u8 wflag;
u8 cnt;
rtw_write16(rtwdev, REG_MDIO_V1, data);
page = addr < 0x20 ? 0 : 1;
page += g1 ? 0 : 2;
rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & 0x1f);
rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1);
cnt = 20;
while (wflag && (cnt != 0)) {
udelay(10);
wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
BIT_MDIO_WFLAG_V1);
cnt--;
}
WARN(wflag, "MDIO write fail\n");
}
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_intf_phy_para *para;
u16 cut;
u16 value;
u16 offset;
u16 ip_sel;
int i;
cut = BIT(0) << rtwdev->hal.cut_version;
for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
para = &chip->intf_table->gen1_para[i];
if (!(para->cut_mask & cut))
continue;
if (para->offset == 0xffff)
break;
offset = para->offset;
value = para->value;
ip_sel = para->ip_sel;
if (para->ip_sel == RTW_IP_SEL_PHY)
rtw_mdio_write(rtwdev, offset, value, true);
else
rtw_dbi_write8(rtwdev, offset, value);
}
for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
para = &chip->intf_table->gen2_para[i];
if (!(para->cut_mask & cut))
continue;
if (para->offset == 0xffff)
break;
offset = para->offset;
value = para->value;
ip_sel = para->ip_sel;
if (para->ip_sel == RTW_IP_SEL_PHY)
rtw_mdio_write(rtwdev, offset, value, false);
else
rtw_dbi_write8(rtwdev, offset, value);
}
}
static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
{
int ret;
ret = pci_enable_device(pdev);
if (ret) {
rtw_err(rtwdev, "failed to enable pci device\n");
return ret;
}
pci_set_master(pdev);
pci_set_drvdata(pdev, rtwdev->hw);
SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
return 0;
}
static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
{
pci_clear_master(pdev);
pci_disable_device(pdev);
}
static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
{
struct rtw_pci *rtwpci;
int ret;
rtwpci = (struct rtw_pci *)rtwdev->priv;
rtwpci->pdev = pdev;
/* after this driver can access to hw registers */
ret = rtw_pci_io_mapping(rtwdev, pdev);
if (ret) {
rtw_err(rtwdev, "failed to request pci io region\n");
goto err_out;
}
ret = rtw_pci_init(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to allocate pci resources\n");
goto err_io_unmap;
}
rtw_pci_phy_cfg(rtwdev);
return 0;
err_io_unmap:
rtw_pci_io_unmapping(rtwdev, pdev);
err_out:
return ret;
}
static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
{
rtw_pci_deinit(rtwdev);
rtw_pci_io_unmapping(rtwdev, pdev);
}
static struct rtw_hci_ops rtw_pci_ops = {
.tx = rtw_pci_tx,
.setup = rtw_pci_setup,
.start = rtw_pci_start,
.stop = rtw_pci_stop,
.read8 = rtw_pci_read8,
.read16 = rtw_pci_read16,
.read32 = rtw_pci_read32,
.write8 = rtw_pci_write8,
.write16 = rtw_pci_write16,
.write32 = rtw_pci_write32,
.write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
.write_data_h2c = rtw_pci_write_data_h2c,
};
static int rtw_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct ieee80211_hw *hw;
struct rtw_dev *rtwdev;
int drv_data_size;
int ret;
drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
if (!hw) {
dev_err(&pdev->dev, "failed to allocate hw\n");
return -ENOMEM;
}
rtwdev = hw->priv;
rtwdev->hw = hw;
rtwdev->dev = &pdev->dev;
rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
rtwdev->hci.ops = &rtw_pci_ops;
rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
ret = rtw_core_init(rtwdev);
if (ret)
goto err_release_hw;
rtw_dbg(rtwdev, RTW_DBG_PCI,
"rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
pdev->vendor, pdev->device, pdev->revision);
ret = rtw_pci_claim(rtwdev, pdev);
if (ret) {
rtw_err(rtwdev, "failed to claim pci device\n");
goto err_deinit_core;
}
ret = rtw_pci_setup_resource(rtwdev, pdev);
if (ret) {
rtw_err(rtwdev, "failed to setup pci resources\n");
goto err_pci_declaim;
}
ret = rtw_chip_info_setup(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to setup chip information\n");
goto err_destroy_pci;
}
ret = rtw_register_hw(rtwdev, hw);
if (ret) {
rtw_err(rtwdev, "failed to register hw\n");
goto err_destroy_pci;
}
ret = request_irq(pdev->irq, &rtw_pci_interrupt_handler,
IRQF_SHARED, KBUILD_MODNAME, rtwdev);
if (ret) {
ieee80211_unregister_hw(hw);
goto err_destroy_pci;
}
return 0;
err_destroy_pci:
rtw_pci_destroy(rtwdev, pdev);
err_pci_declaim:
rtw_pci_declaim(rtwdev, pdev);
err_deinit_core:
rtw_core_deinit(rtwdev);
err_release_hw:
ieee80211_free_hw(hw);
return ret;
}
static void rtw_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtw_dev *rtwdev;
struct rtw_pci *rtwpci;
if (!hw)
return;
rtwdev = hw->priv;
rtwpci = (struct rtw_pci *)rtwdev->priv;
rtw_unregister_hw(rtwdev, hw);
rtw_pci_disable_interrupt(rtwdev, rtwpci);
rtw_pci_destroy(rtwdev, pdev);
rtw_pci_declaim(rtwdev, pdev);
free_irq(rtwpci->pdev->irq, rtwdev);
rtw_core_deinit(rtwdev);
ieee80211_free_hw(hw);
}
static const struct pci_device_id rtw_pci_id_table[] = {
#ifdef CONFIG_RTW88_8822BE
{ RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) },
#endif
#ifdef CONFIG_RTW88_8822CE
{ RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) },
#endif
{},
};
MODULE_DEVICE_TABLE(pci, rtw_pci_id_table);
static struct pci_driver rtw_pci_driver = {
.name = "rtw_pci",
.id_table = rtw_pci_id_table,
.probe = rtw_pci_probe,
.remove = rtw_pci_remove,
};
module_pci_driver(rtw_pci_driver);
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
MODULE_LICENSE("Dual BSD/GPL");
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTK_PCI_H_
#define __RTK_PCI_H_
#define RTK_PCI_DEVICE(vend, dev, hw_config) \
PCI_DEVICE(vend, dev), \
.driver_data = (kernel_ulong_t)&(hw_config),
#define RTK_DEFAULT_TX_DESC_NUM 128
#define RTK_BEQ_TX_DESC_NUM 256
#define RTK_MAX_RX_DESC_NUM 512
/* 8K + rx desc size */
#define RTK_PCI_RX_BUF_SIZE (8192 + 24)
#define RTK_PCI_CTRL 0x300
#define BIT_RST_TRXDMA_INTF BIT(20)
#define BIT_RX_TAG_EN BIT(15)
#define REG_DBI_WDATA_V1 0x03E8
#define REG_DBI_FLAG_V1 0x03F0
#define REG_MDIO_V1 0x03F4
#define REG_PCIE_MIX_CFG 0x03F8
#define BIT_MDIO_WFLAG_V1 BIT(5)
#define BIT_PCI_BCNQ_FLAG BIT(4)
#define RTK_PCI_TXBD_DESA_BCNQ 0x308
#define RTK_PCI_TXBD_DESA_H2CQ 0x1320
#define RTK_PCI_TXBD_DESA_MGMTQ 0x310
#define RTK_PCI_TXBD_DESA_BKQ 0x330
#define RTK_PCI_TXBD_DESA_BEQ 0x328
#define RTK_PCI_TXBD_DESA_VIQ 0x320
#define RTK_PCI_TXBD_DESA_VOQ 0x318
#define RTK_PCI_TXBD_DESA_HI0Q 0x340
#define RTK_PCI_RXBD_DESA_MPDUQ 0x338
/* BCNQ is specialized for rsvd page, does not need to specify a number */
#define RTK_PCI_TXBD_NUM_H2CQ 0x1328
#define RTK_PCI_TXBD_NUM_MGMTQ 0x380
#define RTK_PCI_TXBD_NUM_BKQ 0x38A
#define RTK_PCI_TXBD_NUM_BEQ 0x388
#define RTK_PCI_TXBD_NUM_VIQ 0x386
#define RTK_PCI_TXBD_NUM_VOQ 0x384
#define RTK_PCI_TXBD_NUM_HI0Q 0x38C
#define RTK_PCI_RXBD_NUM_MPDUQ 0x382
#define RTK_PCI_TXBD_IDX_H2CQ 0x132C
#define RTK_PCI_TXBD_IDX_MGMTQ 0x3B0
#define RTK_PCI_TXBD_IDX_BKQ 0x3AC
#define RTK_PCI_TXBD_IDX_BEQ 0x3A8
#define RTK_PCI_TXBD_IDX_VIQ 0x3A4
#define RTK_PCI_TXBD_IDX_VOQ 0x3A0
#define RTK_PCI_TXBD_IDX_HI0Q 0x3B8
#define RTK_PCI_RXBD_IDX_MPDUQ 0x3B4
#define RTK_PCI_TXBD_RWPTR_CLR 0x39C
#define RTK_PCI_TXBD_H2CQ_CSR 0x1330
#define BIT_CLR_H2CQ_HOST_IDX BIT(16)
#define BIT_CLR_H2CQ_HW_IDX BIT(8)
#define RTK_PCI_HIMR0 0x0B0
#define RTK_PCI_HISR0 0x0B4
#define RTK_PCI_HIMR1 0x0B8
#define RTK_PCI_HISR1 0x0BC
#define RTK_PCI_HIMR2 0x10B0
#define RTK_PCI_HISR2 0x10B4
#define RTK_PCI_HIMR3 0x10B8
#define RTK_PCI_HISR3 0x10BC
/* IMR 0 */
#define IMR_TIMER2 BIT(31)
#define IMR_TIMER1 BIT(30)
#define IMR_PSTIMEOUT BIT(29)
#define IMR_GTINT4 BIT(28)
#define IMR_GTINT3 BIT(27)
#define IMR_TBDER BIT(26)
#define IMR_TBDOK BIT(25)
#define IMR_TSF_BIT32_TOGGLE BIT(24)
#define IMR_BCNDMAINT0 BIT(20)
#define IMR_BCNDOK0 BIT(16)
#define IMR_HSISR_IND_ON_INT BIT(15)
#define IMR_BCNDMAINT_E BIT(14)
#define IMR_ATIMEND BIT(12)
#define IMR_HISR1_IND_INT BIT(11)
#define IMR_C2HCMD BIT(10)
#define IMR_CPWM2 BIT(9)
#define IMR_CPWM BIT(8)
#define IMR_HIGHDOK BIT(7)
#define IMR_MGNTDOK BIT(6)
#define IMR_BKDOK BIT(5)
#define IMR_BEDOK BIT(4)
#define IMR_VIDOK BIT(3)
#define IMR_VODOK BIT(2)
#define IMR_RDU BIT(1)
#define IMR_ROK BIT(0)
/* IMR 1 */
#define IMR_TXFIFO_TH_INT BIT(30)
#define IMR_BTON_STS_UPDATE BIT(29)
#define IMR_MCUERR BIT(28)
#define IMR_BCNDMAINT7 BIT(27)
#define IMR_BCNDMAINT6 BIT(26)
#define IMR_BCNDMAINT5 BIT(25)
#define IMR_BCNDMAINT4 BIT(24)
#define IMR_BCNDMAINT3 BIT(23)
#define IMR_BCNDMAINT2 BIT(22)
#define IMR_BCNDMAINT1 BIT(21)
#define IMR_BCNDOK7 BIT(20)
#define IMR_BCNDOK6 BIT(19)
#define IMR_BCNDOK5 BIT(18)
#define IMR_BCNDOK4 BIT(17)
#define IMR_BCNDOK3 BIT(16)
#define IMR_BCNDOK2 BIT(15)
#define IMR_BCNDOK1 BIT(14)
#define IMR_ATIMEND_E BIT(13)
#define IMR_ATIMEND BIT(12)
#define IMR_TXERR BIT(11)
#define IMR_RXERR BIT(10)
#define IMR_TXFOVW BIT(9)
#define IMR_RXFOVW BIT(8)
#define IMR_CPU_MGQ_TXDONE BIT(5)
#define IMR_PS_TIMER_C BIT(4)
#define IMR_PS_TIMER_B BIT(3)
#define IMR_PS_TIMER_A BIT(2)
#define IMR_CPUMGQ_TX_TIMER BIT(1)
/* IMR 3 */
#define IMR_H2CDOK BIT(16)
/* one element is reserved to know if the ring is closed */
static inline int avail_desc(u32 wp, u32 rp, u32 len)
{
if (rp > wp)
return rp - wp - 1;
else
return len - wp + rp - 1;
}
#define RTK_PCI_TXBD_OWN_OFFSET 15
#define RTK_PCI_TXBD_BCN_WORK 0x383
struct rtw_pci_tx_buffer_desc {
__le16 buf_size;
__le16 psb_len;
__le32 dma;
};
struct rtw_pci_tx_data {
dma_addr_t dma;
u8 sn;
};
struct rtw_pci_ring {
u8 *head;
dma_addr_t dma;
u8 desc_size;
u32 len;
u32 wp;
u32 rp;
};
struct rtw_pci_tx_ring {
struct rtw_pci_ring r;
struct sk_buff_head queue;
bool queue_stopped;
};
struct rtw_pci_rx_buffer_desc {
__le16 buf_size;
__le16 total_pkt_size;
__le32 dma;
};
struct rtw_pci_rx_ring {
struct rtw_pci_ring r;
struct sk_buff *buf[RTK_MAX_RX_DESC_NUM];
};
#define RX_TAG_MAX 8192
struct rtw_pci {
struct pci_dev *pdev;
/* used for pci interrupt */
spinlock_t irq_lock;
u32 irq_mask[4];
bool irq_enabled;
u16 rx_tag;
struct rtw_pci_tx_ring tx_rings[RTK_MAX_TX_QUEUE_NUM];
struct rtw_pci_rx_ring rx_rings[RTK_MAX_RX_QUEUE_NUM];
void __iomem *mmap;
};
static u32 max_num_of_tx_queue(u8 queue)
{
u32 max_num;
switch (queue) {
case RTW_TX_QUEUE_BE:
max_num = RTK_BEQ_TX_DESC_NUM;
break;
case RTW_TX_QUEUE_BCN:
max_num = 1;
break;
default:
max_num = RTK_DEFAULT_TX_DESC_NUM;
break;
}
return max_num;
}
static inline struct
rtw_pci_tx_data *rtw_pci_get_tx_data(struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
BUILD_BUG_ON(sizeof(struct rtw_pci_tx_data) >
sizeof(info->status.status_driver_data));
return (struct rtw_pci_tx_data *)info->status.status_driver_data;
}
static inline
struct rtw_pci_tx_buffer_desc *get_tx_buffer_desc(struct rtw_pci_tx_ring *ring,
u32 size)
{
u8 *buf_desc;
buf_desc = ring->r.head + ring->r.wp * size;
return (struct rtw_pci_tx_buffer_desc *)buf_desc;
}
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include <linux/bcd.h>
#include "main.h"
#include "reg.h"
#include "fw.h"
#include "phy.h"
#include "debug.h"
struct phy_cfg_pair {
u32 addr;
u32 data;
};
union phy_table_tile {
struct rtw_phy_cond cond;
struct phy_cfg_pair cfg;
};
struct phy_pg_cfg_pair {
u32 band;
u32 rf_path;
u32 tx_num;
u32 addr;
u32 bitmask;
u32 data;
};
struct txpwr_lmt_cfg_pair {
u8 regd;
u8 band;
u8 bw;
u8 rs;
u8 ch;
s8 txpwr_lmt;
};
static const u32 db_invert_table[12][8] = {
{10, 13, 16, 20,
25, 32, 40, 50},
{64, 80, 101, 128,
160, 201, 256, 318},
{401, 505, 635, 800,
1007, 1268, 1596, 2010},
{316, 398, 501, 631,
794, 1000, 1259, 1585},
{1995, 2512, 3162, 3981,
5012, 6310, 7943, 10000},
{12589, 15849, 19953, 25119,
31623, 39811, 50119, 63098},
{79433, 100000, 125893, 158489,
199526, 251189, 316228, 398107},
{501187, 630957, 794328, 1000000,
1258925, 1584893, 1995262, 2511886},
{3162278, 3981072, 5011872, 6309573,
7943282, 1000000, 12589254, 15848932},
{19952623, 25118864, 31622777, 39810717,
50118723, 63095734, 79432823, 100000000},
{125892541, 158489319, 199526232, 251188643,
316227766, 398107171, 501187234, 630957345},
{794328235, 1000000000, 1258925412, 1584893192,
1995262315, 2511886432U, 3162277660U, 3981071706U}
};
enum rtw_phy_band_type {
PHY_BAND_2G = 0,
PHY_BAND_5G = 1,
};
void rtw_phy_init(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u32 addr, mask;
dm_info->fa_history[3] = 0;
dm_info->fa_history[2] = 0;
dm_info->fa_history[1] = 0;
dm_info->fa_history[0] = 0;
dm_info->igi_bitmap = 0;
dm_info->igi_history[3] = 0;
dm_info->igi_history[2] = 0;
dm_info->igi_history[1] = 0;
addr = chip->dig[0].addr;
mask = chip->dig[0].mask;
dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask);
}
void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
u32 addr, mask;
u8 path;
for (path = 0; path < hal->rf_path_num; path++) {
addr = chip->dig[path].addr;
mask = chip->dig[path].mask;
rtw_write32_mask(rtwdev, addr, mask, igi);
}
}
static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->false_alarm_statistics(rtwdev);
}
#define RA_FLOOR_TABLE_SIZE 7
#define RA_FLOOR_UP_GAP 3
static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi)
{
u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100};
u8 new_level = 0;
int i;
for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++)
if (i >= old_level)
table[i] += RA_FLOOR_UP_GAP;
for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
if (rssi < table[i]) {
new_level = i;
break;
}
}
return new_level;
}
struct rtw_phy_stat_iter_data {
struct rtw_dev *rtwdev;
u8 min_rssi;
};
static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw_phy_stat_iter_data *iter_data = data;
struct rtw_dev *rtwdev = iter_data->rtwdev;
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
u8 rssi, rssi_level;
rssi = ewma_rssi_read(&si->avg_rssi);
rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi);
rtw_fw_send_rssi_info(rtwdev, si);
iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi);
}
static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
struct rtw_phy_stat_iter_data data = {};
data.rtwdev = rtwdev;
data.min_rssi = U8_MAX;
rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data);
dm_info->pre_min_rssi = dm_info->min_rssi;
dm_info->min_rssi = data.min_rssi;
}
static void rtw_phy_statistics(struct rtw_dev *rtwdev)
{
rtw_phy_stat_rssi(rtwdev);
rtw_phy_stat_false_alarm(rtwdev);
}
#define DIG_PERF_FA_TH_LOW 250
#define DIG_PERF_FA_TH_HIGH 500
#define DIG_PERF_FA_TH_EXTRA_HIGH 750
#define DIG_PERF_MAX 0x5a
#define DIG_PERF_MID 0x40
#define DIG_CVRG_FA_TH_LOW 2000
#define DIG_CVRG_FA_TH_HIGH 4000
#define DIG_CVRG_FA_TH_EXTRA_HIGH 5000
#define DIG_CVRG_MAX 0x2a
#define DIG_CVRG_MID 0x26
#define DIG_CVRG_MIN 0x1c
#define DIG_RSSI_GAIN_OFFSET 15
static bool
rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info)
{
u16 fa_lo = DIG_PERF_FA_TH_LOW;
u16 fa_hi = DIG_PERF_FA_TH_HIGH;
u16 *fa_history;
u8 *igi_history;
u8 damping_rssi;
u8 min_rssi;
u8 diff;
u8 igi_bitmap;
bool damping = false;
min_rssi = dm_info->min_rssi;
if (dm_info->damping) {
damping_rssi = dm_info->damping_rssi;
diff = min_rssi > damping_rssi ? min_rssi - damping_rssi :
damping_rssi - min_rssi;
if (diff > 3 || dm_info->damping_cnt++ > 20) {
dm_info->damping = false;
return false;
}
return true;
}
igi_history = dm_info->igi_history;
fa_history = dm_info->fa_history;
igi_bitmap = dm_info->igi_bitmap & 0xf;
switch (igi_bitmap) {
case 5:
/* down -> up -> down -> up */
if (igi_history[0] > igi_history[1] &&
igi_history[2] > igi_history[3] &&
igi_history[0] - igi_history[1] >= 2 &&
igi_history[2] - igi_history[3] >= 2 &&
fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
fa_history[2] > fa_hi && fa_history[3] < fa_lo)
damping = true;
break;
case 9:
/* up -> down -> down -> up */
if (igi_history[0] > igi_history[1] &&
igi_history[3] > igi_history[2] &&
igi_history[0] - igi_history[1] >= 4 &&
igi_history[3] - igi_history[2] >= 2 &&
fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
fa_history[2] < fa_lo && fa_history[3] > fa_hi)
damping = true;
break;
default:
return false;
}
if (damping) {
dm_info->damping = true;
dm_info->damping_cnt = 0;
dm_info->damping_rssi = min_rssi;
}
return damping;
}
static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info,
u8 *upper, u8 *lower, bool linked)
{
u8 dig_max, dig_min, dig_mid;
u8 min_rssi;
if (linked) {
dig_max = DIG_PERF_MAX;
dig_mid = DIG_PERF_MID;
/* 22B=0x1c, 22C=0x20 */
dig_min = 0x1c;
min_rssi = max_t(u8, dm_info->min_rssi, dig_min);
} else {
dig_max = DIG_CVRG_MAX;
dig_mid = DIG_CVRG_MID;
dig_min = DIG_CVRG_MIN;
min_rssi = dig_min;
}
/* DIG MAX should be bounded by minimum RSSI with offset +15 */
dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET);
*lower = clamp_t(u8, min_rssi, dig_min, dig_mid);
*upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max);
}
static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info,
u16 *fa_th, u8 *step, bool linked)
{
u8 min_rssi, pre_min_rssi;
min_rssi = dm_info->min_rssi;
pre_min_rssi = dm_info->pre_min_rssi;
step[0] = 4;
step[1] = 3;
step[2] = 2;
if (linked) {
fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH;
fa_th[1] = DIG_PERF_FA_TH_HIGH;
fa_th[2] = DIG_PERF_FA_TH_LOW;
if (pre_min_rssi > min_rssi) {
step[0] = 6;
step[1] = 4;
step[2] = 2;
}
} else {
fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH;
fa_th[1] = DIG_CVRG_FA_TH_HIGH;
fa_th[2] = DIG_CVRG_FA_TH_LOW;
}
}
static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa)
{
u8 *igi_history;
u16 *fa_history;
u8 igi_bitmap;
bool up;
igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe;
igi_history = dm_info->igi_history;
fa_history = dm_info->fa_history;
up = igi > igi_history[0];
igi_bitmap |= up;
igi_history[3] = igi_history[2];
igi_history[2] = igi_history[1];
igi_history[1] = igi_history[0];
igi_history[0] = igi;
fa_history[3] = fa_history[2];
fa_history[2] = fa_history[1];
fa_history[1] = fa_history[0];
fa_history[0] = fa;
dm_info->igi_bitmap = igi_bitmap;
}
static void rtw_phy_dig(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 upper_bound, lower_bound;
u8 pre_igi, cur_igi;
u16 fa_th[3], fa_cnt;
u8 level;
u8 step[3];
bool linked;
if (rtw_flag_check(rtwdev, RTW_FLAG_DIG_DISABLE))
return;
if (rtw_phy_dig_check_damping(dm_info))
return;
linked = !!rtwdev->sta_cnt;
fa_cnt = dm_info->total_fa_cnt;
pre_igi = dm_info->igi_history[0];
rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked);
/* test the false alarm count from the highest threshold level first,
* and increase it by corresponding step size
*
* note that the step size is offset by -2, compensate it afterall
*/
cur_igi = pre_igi;
for (level = 0; level < 3; level++) {
if (fa_cnt > fa_th[level]) {
cur_igi += step[level];
break;
}
}
cur_igi -= 2;
/* calculate the upper/lower bound by the minimum rssi we have among
* the peers connected with us, meanwhile make sure the igi value does
* not beyond the hardware limitation
*/
rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked);
cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound);
/* record current igi value and false alarm statistics for further
* damping checks, and record the trend of igi values
*/
rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt);
if (cur_igi != pre_igi)
rtw_phy_dig_write(rtwdev, cur_igi);
}
static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw_dev *rtwdev = data;
struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
rtw_update_sta_info(rtwdev, si);
}
static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev)
{
if (rtwdev->watch_dog_cnt & 0x3)
return;
rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev);
}
void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
{
/* for further calculation */
rtw_phy_statistics(rtwdev);
rtw_phy_dig(rtwdev);
rtw_phy_ra_info_update(rtwdev);
}
#define FRAC_BITS 3
static u8 rtw_phy_power_2_db(s8 power)
{
if (power <= -100 || power >= 20)
return 0;
else if (power >= 0)
return 100;
else
return 100 + power;
}
static u64 rtw_phy_db_2_linear(u8 power_db)
{
u8 i, j;
u64 linear;
/* 1dB ~ 96dB */
i = (power_db - 1) >> 3;
j = (power_db - 1) - (i << 3);
linear = db_invert_table[i][j];
linear = i > 2 ? linear << FRAC_BITS : linear;
return linear;
}
static u8 rtw_phy_linear_2_db(u64 linear)
{
u8 i;
u8 j;
u32 dB;
if (linear >= db_invert_table[11][7])
return 96; /* maximum 96 dB */
for (i = 0; i < 12; i++) {
if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7])
break;
else if (i > 2 && linear <= db_invert_table[i][7])
break;
}
for (j = 0; j < 8; j++) {
if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j])
break;
else if (i > 2 && linear <= db_invert_table[i][j])
break;
}
if (j == 0 && i == 0)
goto end;
if (j == 0) {
if (i != 3) {
if (db_invert_table[i][0] - linear >
linear - db_invert_table[i - 1][7]) {
i = i - 1;
j = 7;
}
} else {
if (db_invert_table[3][0] - linear >
linear - db_invert_table[2][7]) {
i = 2;
j = 7;
}
}
} else {
if (db_invert_table[i][j] - linear >
linear - db_invert_table[i][j - 1]) {
j = j - 1;
}
}
end:
dB = (i << 3) + j + 1;
return dB;
}
u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num)
{
s8 power;
u8 power_db;
u64 linear;
u64 sum = 0;
u8 path;
for (path = 0; path < path_num; path++) {
power = rf_power[path];
power_db = rtw_phy_power_2_db(power);
linear = rtw_phy_db_2_linear(power_db);
sum += linear;
}
sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS;
switch (path_num) {
case 2:
sum >>= 1;
break;
case 3:
sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5;
break;
case 4:
sum >>= 2;
break;
default:
break;
}
return rtw_phy_linear_2_db(sum);
}
u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask)
{
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_chip_info *chip = rtwdev->chip;
const u32 *base_addr = chip->rf_base_addr;
u32 val, direct_addr;
if (rf_path >= hal->rf_path_num) {
rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
return INV_RF_DATA;
}
addr &= 0xff;
direct_addr = base_addr[rf_path] + (addr << 2);
mask &= RFREG_MASK;
val = rtw_read32_mask(rtwdev, direct_addr, mask);
return val;
}
bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_chip_info *chip = rtwdev->chip;
u32 *sipi_addr = chip->rf_sipi_addr;
u32 data_and_addr;
u32 old_data = 0;
u32 shift;
if (rf_path >= hal->rf_path_num) {
rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
return false;
}
addr &= 0xff;
mask &= RFREG_MASK;
if (mask != RFREG_MASK) {
old_data = rtw_phy_read_rf(rtwdev, rf_path, addr, RFREG_MASK);
if (old_data == INV_RF_DATA) {
rtw_err(rtwdev, "Write fail, rf is disabled\n");
return false;
}
shift = __ffs(mask);
data = ((old_data) & (~mask)) | (data << shift);
}
data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr);
udelay(13);
return true;
}
bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_chip_info *chip = rtwdev->chip;
const u32 *base_addr = chip->rf_base_addr;
u32 direct_addr;
if (rf_path >= hal->rf_path_num) {
rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
return false;
}
addr &= 0xff;
direct_addr = base_addr[rf_path] + (addr << 2);
mask &= RFREG_MASK;
rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI);
rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI);
rtw_write32_mask(rtwdev, direct_addr, mask, data);
udelay(1);
rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI);
rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI);
return true;
}
bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
if (addr != 0x00)
return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data);
return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data);
}
void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg)
{
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_phy_cond cond = {0};
cond.cut = hal->cut_version ? hal->cut_version : 15;
cond.pkg = pkg ? pkg : 15;
cond.plat = 0x04;
cond.rfe = efuse->rfe_option;
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_USB:
cond.intf = INTF_USB;
break;
case RTW_HCI_TYPE_SDIO:
cond.intf = INTF_SDIO;
break;
case RTW_HCI_TYPE_PCIE:
default:
cond.intf = INTF_PCIE;
break;
}
hal->phy_cond = cond;
rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond));
}
static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond)
{
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_phy_cond drv_cond = hal->phy_cond;
if (cond.cut && cond.cut != drv_cond.cut)
return false;
if (cond.pkg && cond.pkg != drv_cond.pkg)
return false;
if (cond.intf && cond.intf != drv_cond.intf)
return false;
if (cond.rfe != drv_cond.rfe)
return false;
return true;
}
void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
{
const union phy_table_tile *p = tbl->data;
const union phy_table_tile *end = p + tbl->size / 2;
struct rtw_phy_cond pos_cond = {0};
bool is_matched = true, is_skipped = false;
BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair));
for (; p < end; p++) {
if (p->cond.pos) {
switch (p->cond.branch) {
case BRANCH_ENDIF:
is_matched = true;
is_skipped = false;
break;
case BRANCH_ELSE:
is_matched = is_skipped ? false : true;
break;
case BRANCH_IF:
case BRANCH_ELIF:
default:
pos_cond = p->cond;
break;
}
} else if (p->cond.neg) {
if (!is_skipped) {
if (check_positive(rtwdev, pos_cond)) {
is_matched = true;
is_skipped = true;
} else {
is_matched = false;
is_skipped = false;
}
} else {
is_matched = false;
}
} else if (is_matched) {
(*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data);
}
}
}
void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
{
const struct phy_pg_cfg_pair *p = tbl->data;
const struct phy_pg_cfg_pair *end = p + tbl->size / 6;
BUILD_BUG_ON(sizeof(struct phy_pg_cfg_pair) != sizeof(u32) * 6);
for (; p < end; p++) {
if (p->addr == 0xfe || p->addr == 0xffe) {
msleep(50);
continue;
}
phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path,
p->tx_num, p->addr, p->bitmask,
p->data);
}
}
void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
const struct rtw_table *tbl)
{
const struct txpwr_lmt_cfg_pair *p = tbl->data;
const struct txpwr_lmt_cfg_pair *end = p + tbl->size / 6;
BUILD_BUG_ON(sizeof(struct txpwr_lmt_cfg_pair) != sizeof(u8) * 6);
for (; p < end; p++) {
phy_set_tx_power_limit(rtwdev, p->regd, p->band,
p->bw, p->rs,
p->ch, p->txpwr_lmt);
}
}
void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data)
{
rtw_write8(rtwdev, addr, data);
}
void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data)
{
rtw_write32(rtwdev, addr, data);
}
void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data)
{
if (addr == 0xfe)
msleep(50);
else if (addr == 0xfd)
mdelay(5);
else if (addr == 0xfc)
mdelay(1);
else if (addr == 0xfb)
usleep_range(50, 60);
else if (addr == 0xfa)
udelay(5);
else if (addr == 0xf9)
udelay(1);
else
rtw_write32(rtwdev, addr, data);
}
void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data)
{
if (addr == 0xffe) {
msleep(50);
} else if (addr == 0xfe) {
usleep_range(100, 110);
} else {
rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data);
udelay(1);
}
}
static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
if (!chip->rfk_init_tbl)
return;
rtw_load_table(rtwdev, chip->rfk_init_tbl);
}
void rtw_phy_load_tables(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
u8 rf_path;
rtw_load_table(rtwdev, chip->mac_tbl);
rtw_load_table(rtwdev, chip->bb_tbl);
rtw_load_table(rtwdev, chip->agc_tbl);
rtw_load_rfk_table(rtwdev);
for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
const struct rtw_table *tbl;
tbl = chip->rf_tbl[rf_path];
rtw_load_table(rtwdev, tbl);
}
}
#define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8))
#define RTW_MAX_POWER_INDEX 0x3F
u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M };
u8 rtw_ofdm_rates[] = {
DESC_RATE6M, DESC_RATE9M, DESC_RATE12M,
DESC_RATE18M, DESC_RATE24M, DESC_RATE36M,
DESC_RATE48M, DESC_RATE54M
};
u8 rtw_ht_1s_rates[] = {
DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2,
DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5,
DESC_RATEMCS6, DESC_RATEMCS7
};
u8 rtw_ht_2s_rates[] = {
DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10,
DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13,
DESC_RATEMCS14, DESC_RATEMCS15
};
u8 rtw_vht_1s_rates[] = {
DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1,
DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3,
DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5,
DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9
};
u8 rtw_vht_2s_rates[] = {
DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1,
DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3,
DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5,
DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9
};
u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates);
u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates);
u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates);
u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = {
rtw_cck_rates, rtw_ofdm_rates,
rtw_ht_1s_rates, rtw_ht_2s_rates,
rtw_vht_1s_rates, rtw_vht_2s_rates
};
u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = {
ARRAY_SIZE(rtw_cck_rates),
ARRAY_SIZE(rtw_ofdm_rates),
ARRAY_SIZE(rtw_ht_1s_rates),
ARRAY_SIZE(rtw_ht_2s_rates),
ARRAY_SIZE(rtw_vht_1s_rates),
ARRAY_SIZE(rtw_vht_2s_rates)
};
static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = {
36, 38, 40, 42, 44, 46, 48, /* Band 1 */
52, 54, 56, 58, 60, 62, 64, /* Band 2 */
100, 102, 104, 106, 108, 110, 112, /* Band 3 */
116, 118, 120, 122, 124, 126, 128, /* Band 3 */
132, 134, 136, 138, 140, 142, 144, /* Band 3 */
149, 151, 153, 155, 157, 159, 161, /* Band 4 */
165, 167, 169, 171, 173, 175, 177}; /* Band 4 */
static int rtw_channel_to_idx(u8 band, u8 channel)
{
int ch_idx;
u8 n_channel;
if (band == PHY_BAND_2G) {
ch_idx = channel - 1;
n_channel = RTW_MAX_CHANNEL_NUM_2G;
} else if (band == PHY_BAND_5G) {
n_channel = RTW_MAX_CHANNEL_NUM_5G;
for (ch_idx = 0; ch_idx < n_channel; ch_idx++)
if (rtw_channel_idx_5g[ch_idx] == channel)
break;
} else {
return -1;
}
if (ch_idx >= n_channel)
return -1;
return ch_idx;
}
static u8 rtw_get_channel_group(u8 channel)
{
switch (channel) {
default:
WARN_ON(1);
case 1:
case 2:
case 36:
case 38:
case 40:
case 42:
return 0;
case 3:
case 4:
case 5:
case 44:
case 46:
case 48:
case 50:
return 1;
case 6:
case 7:
case 8:
case 52:
case 54:
case 56:
case 58:
return 2;
case 9:
case 10:
case 11:
case 60:
case 62:
case 64:
return 3;
case 12:
case 13:
case 100:
case 102:
case 104:
case 106:
return 4;
case 14:
case 108:
case 110:
case 112:
case 114:
return 5;
case 116:
case 118:
case 120:
case 122:
return 6;
case 124:
case 126:
case 128:
case 130:
return 7;
case 132:
case 134:
case 136:
case 138:
return 8;
case 140:
case 142:
case 144:
return 9;
case 149:
case 151:
case 153:
case 155:
return 10;
case 157:
case 159:
case 161:
return 11;
case 165:
case 167:
case 169:
case 171:
return 12;
case 173:
case 175:
case 177:
return 13;
}
}
static u8 phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
struct rtw_2g_txpwr_idx *pwr_idx_2g,
enum rtw_bandwidth bandwidth,
u8 rate, u8 group)
{
struct rtw_chip_info *chip = rtwdev->chip;
u8 tx_power;
bool mcs_rate;
bool above_2ss;
u8 factor = chip->txgi_factor;
if (rate <= DESC_RATE11M)
tx_power = pwr_idx_2g->cck_base[group];
else
tx_power = pwr_idx_2g->bw40_base[group];
if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor;
mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
(rate >= DESC_RATEVHT1SS_MCS0 &&
rate <= DESC_RATEVHT2SS_MCS9);
above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
(rate >= DESC_RATEVHT2SS_MCS0);
if (!mcs_rate)
return tx_power;
switch (bandwidth) {
default:
WARN_ON(1);
case RTW_CHANNEL_WIDTH_20:
tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor;
if (above_2ss)
tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor;
break;
case RTW_CHANNEL_WIDTH_40:
/* bw40 is the base power */
if (above_2ss)
tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor;
break;
}
return tx_power;
}
static u8 phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
struct rtw_5g_txpwr_idx *pwr_idx_5g,
enum rtw_bandwidth bandwidth,
u8 rate, u8 group)
{
struct rtw_chip_info *chip = rtwdev->chip;
u8 tx_power;
u8 upper, lower;
bool mcs_rate;
bool above_2ss;
u8 factor = chip->txgi_factor;
tx_power = pwr_idx_5g->bw40_base[group];
mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
(rate >= DESC_RATEVHT1SS_MCS0 &&
rate <= DESC_RATEVHT2SS_MCS9);
above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
(rate >= DESC_RATEVHT2SS_MCS0);
if (!mcs_rate) {
tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor;
return tx_power;
}
switch (bandwidth) {
default:
WARN_ON(1);
case RTW_CHANNEL_WIDTH_20:
tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor;
if (above_2ss)
tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor;
break;
case RTW_CHANNEL_WIDTH_40:
/* bw40 is the base power */
if (above_2ss)
tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor;
break;
case RTW_CHANNEL_WIDTH_80:
/* the base idx of bw80 is the average of bw40+/bw40- */
lower = pwr_idx_5g->bw40_base[group];
upper = pwr_idx_5g->bw40_base[group + 1];
tx_power = (lower + upper) / 2;
tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor;
if (above_2ss)
tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor;
break;
}
return tx_power;
}
/* set tx power level by path for each rates, note that the order of the rates
* are *very* important, bacause 8822B/8821C combines every four bytes of tx
* power index into a four-byte power index register, and calls set_tx_agc to
* write these values into hardware
*/
static
void phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev, u8 ch, u8 path)
{
struct rtw_hal *hal = &rtwdev->hal;
u8 rs;
/* do not need cck rates if we are not in 2.4G */
if (hal->current_band_type == RTW_BAND_2G)
rs = RTW_RATE_SECTION_CCK;
else
rs = RTW_RATE_SECTION_OFDM;
for (; rs < RTW_RATE_SECTION_MAX; rs++)
phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs);
}
void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
u8 path;
mutex_lock(&hal->tx_power_mutex);
for (path = 0; path < hal->rf_path_num; path++)
phy_set_tx_power_level_by_path(rtwdev, channel, path);
chip->ops->set_tx_power_index(rtwdev);
mutex_unlock(&hal->tx_power_mutex);
}
s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
enum rtw_bandwidth bandwidth, u8 rf_path,
u8 rate, u8 channel, u8 regd);
static
u8 phy_get_tx_power_index(void *adapter, u8 rf_path, u8 rate,
enum rtw_bandwidth bandwidth, u8 channel, u8 regd)
{
struct rtw_dev *rtwdev = adapter;
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_txpwr_idx *pwr_idx;
u8 tx_power;
u8 group;
u8 band;
s8 offset, limit;
pwr_idx = &rtwdev->efuse.txpwr_idx_table[rf_path];
group = rtw_get_channel_group(channel);
/* base power index for 2.4G/5G */
if (channel <= 14) {
band = PHY_BAND_2G;
tx_power = phy_get_2g_tx_power_index(rtwdev,
&pwr_idx->pwr_idx_2g,
bandwidth, rate, group);
offset = hal->tx_pwr_by_rate_offset_2g[rf_path][rate];
} else {
band = PHY_BAND_5G;
tx_power = phy_get_5g_tx_power_index(rtwdev,
&pwr_idx->pwr_idx_5g,
bandwidth, rate, group);
offset = hal->tx_pwr_by_rate_offset_5g[rf_path][rate];
}
limit = phy_get_tx_power_limit(rtwdev, band, bandwidth, rf_path,
rate, channel, regd);
if (offset > limit)
offset = limit;
tx_power += offset;
if (tx_power > rtwdev->chip->max_power_index)
tx_power = rtwdev->chip->max_power_index;
return tx_power;
}
void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs)
{
struct rtw_dev *rtwdev = adapter;
struct rtw_hal *hal = &rtwdev->hal;
u8 regd = rtwdev->regd.txpwr_regd;
u8 *rates;
u8 size;
u8 rate;
u8 pwr_idx;
u8 bw;
int i;
if (rs >= RTW_RATE_SECTION_MAX)
return;
rates = rtw_rate_section[rs];
size = rtw_rate_size[rs];
bw = hal->current_band_width;
for (i = 0; i < size; i++) {
rate = rates[i];
pwr_idx = phy_get_tx_power_index(adapter, path, rate, bw, ch,
regd);
hal->tx_pwr_tbl[path][rate] = pwr_idx;
}
}
static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i)
{
if (rtwdev->chip->is_pwr_by_rate_dec)
return bcd_to_dec_pwr_by_rate(hex, i);
else
return (hex >> (i * 8)) & 0xFF;
}
static void phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev,
u32 addr, u32 mask,
u32 val, u8 *rate,
u8 *pwr_by_rate, u8 *rate_num)
{
int i;
switch (addr) {
case 0xE00:
case 0x830:
rate[0] = DESC_RATE6M;
rate[1] = DESC_RATE9M;
rate[2] = DESC_RATE12M;
rate[3] = DESC_RATE18M;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xE04:
case 0x834:
rate[0] = DESC_RATE24M;
rate[1] = DESC_RATE36M;
rate[2] = DESC_RATE48M;
rate[3] = DESC_RATE54M;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xE08:
rate[0] = DESC_RATE1M;
pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1);
*rate_num = 1;
break;
case 0x86C:
if (mask == 0xffffff00) {
rate[0] = DESC_RATE2M;
rate[1] = DESC_RATE5_5M;
rate[2] = DESC_RATE11M;
for (i = 1; i < 4; ++i)
pwr_by_rate[i - 1] =
tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 3;
} else if (mask == 0x000000ff) {
rate[0] = DESC_RATE11M;
pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0);
*rate_num = 1;
}
break;
case 0xE10:
case 0x83C:
rate[0] = DESC_RATEMCS0;
rate[1] = DESC_RATEMCS1;
rate[2] = DESC_RATEMCS2;
rate[3] = DESC_RATEMCS3;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xE14:
case 0x848:
rate[0] = DESC_RATEMCS4;
rate[1] = DESC_RATEMCS5;
rate[2] = DESC_RATEMCS6;
rate[3] = DESC_RATEMCS7;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xE18:
case 0x84C:
rate[0] = DESC_RATEMCS8;
rate[1] = DESC_RATEMCS9;
rate[2] = DESC_RATEMCS10;
rate[3] = DESC_RATEMCS11;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xE1C:
case 0x868:
rate[0] = DESC_RATEMCS12;
rate[1] = DESC_RATEMCS13;
rate[2] = DESC_RATEMCS14;
rate[3] = DESC_RATEMCS15;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0x838:
rate[0] = DESC_RATE1M;
rate[1] = DESC_RATE2M;
rate[2] = DESC_RATE5_5M;
for (i = 1; i < 4; ++i)
pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev,
val, i);
*rate_num = 3;
break;
case 0xC20:
case 0xE20:
case 0x1820:
case 0x1A20:
rate[0] = DESC_RATE1M;
rate[1] = DESC_RATE2M;
rate[2] = DESC_RATE5_5M;
rate[3] = DESC_RATE11M;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xC24:
case 0xE24:
case 0x1824:
case 0x1A24:
rate[0] = DESC_RATE6M;
rate[1] = DESC_RATE9M;
rate[2] = DESC_RATE12M;
rate[3] = DESC_RATE18M;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xC28:
case 0xE28:
case 0x1828:
case 0x1A28:
rate[0] = DESC_RATE24M;
rate[1] = DESC_RATE36M;
rate[2] = DESC_RATE48M;
rate[3] = DESC_RATE54M;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xC2C:
case 0xE2C:
case 0x182C:
case 0x1A2C:
rate[0] = DESC_RATEMCS0;
rate[1] = DESC_RATEMCS1;
rate[2] = DESC_RATEMCS2;
rate[3] = DESC_RATEMCS3;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xC30:
case 0xE30:
case 0x1830:
case 0x1A30:
rate[0] = DESC_RATEMCS4;
rate[1] = DESC_RATEMCS5;
rate[2] = DESC_RATEMCS6;
rate[3] = DESC_RATEMCS7;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xC34:
case 0xE34:
case 0x1834:
case 0x1A34:
rate[0] = DESC_RATEMCS8;
rate[1] = DESC_RATEMCS9;
rate[2] = DESC_RATEMCS10;
rate[3] = DESC_RATEMCS11;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xC38:
case 0xE38:
case 0x1838:
case 0x1A38:
rate[0] = DESC_RATEMCS12;
rate[1] = DESC_RATEMCS13;
rate[2] = DESC_RATEMCS14;
rate[3] = DESC_RATEMCS15;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xC3C:
case 0xE3C:
case 0x183C:
case 0x1A3C:
rate[0] = DESC_RATEVHT1SS_MCS0;
rate[1] = DESC_RATEVHT1SS_MCS1;
rate[2] = DESC_RATEVHT1SS_MCS2;
rate[3] = DESC_RATEVHT1SS_MCS3;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xC40:
case 0xE40:
case 0x1840:
case 0x1A40:
rate[0] = DESC_RATEVHT1SS_MCS4;
rate[1] = DESC_RATEVHT1SS_MCS5;
rate[2] = DESC_RATEVHT1SS_MCS6;
rate[3] = DESC_RATEVHT1SS_MCS7;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xC44:
case 0xE44:
case 0x1844:
case 0x1A44:
rate[0] = DESC_RATEVHT1SS_MCS8;
rate[1] = DESC_RATEVHT1SS_MCS9;
rate[2] = DESC_RATEVHT2SS_MCS0;
rate[3] = DESC_RATEVHT2SS_MCS1;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xC48:
case 0xE48:
case 0x1848:
case 0x1A48:
rate[0] = DESC_RATEVHT2SS_MCS2;
rate[1] = DESC_RATEVHT2SS_MCS3;
rate[2] = DESC_RATEVHT2SS_MCS4;
rate[3] = DESC_RATEVHT2SS_MCS5;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xC4C:
case 0xE4C:
case 0x184C:
case 0x1A4C:
rate[0] = DESC_RATEVHT2SS_MCS6;
rate[1] = DESC_RATEVHT2SS_MCS7;
rate[2] = DESC_RATEVHT2SS_MCS8;
rate[3] = DESC_RATEVHT2SS_MCS9;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xCD8:
case 0xED8:
case 0x18D8:
case 0x1AD8:
rate[0] = DESC_RATEMCS16;
rate[1] = DESC_RATEMCS17;
rate[2] = DESC_RATEMCS18;
rate[3] = DESC_RATEMCS19;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xCDC:
case 0xEDC:
case 0x18DC:
case 0x1ADC:
rate[0] = DESC_RATEMCS20;
rate[1] = DESC_RATEMCS21;
rate[2] = DESC_RATEMCS22;
rate[3] = DESC_RATEMCS23;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xCE0:
case 0xEE0:
case 0x18E0:
case 0x1AE0:
rate[0] = DESC_RATEVHT3SS_MCS0;
rate[1] = DESC_RATEVHT3SS_MCS1;
rate[2] = DESC_RATEVHT3SS_MCS2;
rate[3] = DESC_RATEVHT3SS_MCS3;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xCE4:
case 0xEE4:
case 0x18E4:
case 0x1AE4:
rate[0] = DESC_RATEVHT3SS_MCS4;
rate[1] = DESC_RATEVHT3SS_MCS5;
rate[2] = DESC_RATEVHT3SS_MCS6;
rate[3] = DESC_RATEVHT3SS_MCS7;
for (i = 0; i < 4; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 4;
break;
case 0xCE8:
case 0xEE8:
case 0x18E8:
case 0x1AE8:
rate[0] = DESC_RATEVHT3SS_MCS8;
rate[1] = DESC_RATEVHT3SS_MCS9;
for (i = 0; i < 2; ++i)
pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
*rate_num = 2;
break;
default:
rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr);
break;
}
}
void phy_store_tx_power_by_rate(void *adapter, u32 band, u32 rfpath, u32 txnum,
u32 regaddr, u32 bitmask, u32 data)
{
struct rtw_dev *rtwdev = adapter;
struct rtw_hal *hal = &rtwdev->hal;
u8 rate_num = 0;
u8 rate;
u8 rates[RTW_RF_PATH_MAX] = {0};
s8 offset;
s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0};
int i;
phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data,
rates, pwr_by_rate, &rate_num);
if (WARN_ON(rfpath >= RTW_RF_PATH_MAX ||
(band != PHY_BAND_2G && band != PHY_BAND_5G) ||
rate_num > RTW_RF_PATH_MAX))
return;
for (i = 0; i < rate_num; i++) {
offset = pwr_by_rate[i];
rate = rates[i];
if (band == PHY_BAND_2G)
hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset;
else if (band == PHY_BAND_5G)
hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset;
else
continue;
}
}
static
void phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
u8 rs, u8 size, u8 *rates)
{
u8 rate;
u8 base_idx, rate_idx;
s8 base_2g, base_5g;
if (rs >= RTW_RATE_SECTION_VHT_1S)
base_idx = rates[size - 3];
else
base_idx = rates[size - 1];
base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx];
base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx];
hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g;
hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g;
for (rate = 0; rate < size; rate++) {
rate_idx = rates[rate];
hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g;
hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g;
}
}
void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal)
{
u8 path;
for (path = 0; path < RTW_RF_PATH_MAX; path++) {
phy_tx_power_by_rate_config_by_path(hal, path,
RTW_RATE_SECTION_CCK,
rtw_cck_size, rtw_cck_rates);
phy_tx_power_by_rate_config_by_path(hal, path,
RTW_RATE_SECTION_OFDM,
rtw_ofdm_size, rtw_ofdm_rates);
phy_tx_power_by_rate_config_by_path(hal, path,
RTW_RATE_SECTION_HT_1S,
rtw_ht_1s_size, rtw_ht_1s_rates);
phy_tx_power_by_rate_config_by_path(hal, path,
RTW_RATE_SECTION_HT_2S,
rtw_ht_2s_size, rtw_ht_2s_rates);
phy_tx_power_by_rate_config_by_path(hal, path,
RTW_RATE_SECTION_VHT_1S,
rtw_vht_1s_size, rtw_vht_1s_rates);
phy_tx_power_by_rate_config_by_path(hal, path,
RTW_RATE_SECTION_VHT_2S,
rtw_vht_2s_size, rtw_vht_2s_rates);
}
}
static void
phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
{
s8 base, orig;
u8 ch;
for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) {
base = hal->tx_pwr_by_rate_base_2g[0][rs];
orig = hal->tx_pwr_limit_2g[regd][bw][rs][ch];
hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base;
}
for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) {
base = hal->tx_pwr_by_rate_base_5g[0][rs];
hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base;
}
}
void rtw_phy_tx_power_limit_config(struct rtw_hal *hal)
{
u8 regd, bw, rs;
for (regd = 0; regd < RTW_REGD_MAX; regd++)
for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
phy_tx_power_limit_config(hal, regd, bw, rs);
}
static s8 get_tx_power_limit(struct rtw_hal *hal, u8 bw, u8 rs, u8 ch, u8 regd)
{
if (regd > RTW_REGD_WW)
return RTW_MAX_POWER_INDEX;
return hal->tx_pwr_limit_2g[regd][bw][rs][ch];
}
s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
enum rtw_bandwidth bw, u8 rf_path,
u8 rate, u8 channel, u8 regd)
{
struct rtw_hal *hal = &rtwdev->hal;
s8 power_limit;
u8 rs;
int ch_idx;
if (rate >= DESC_RATE1M && rate <= DESC_RATE11M)
rs = RTW_RATE_SECTION_CCK;
else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
rs = RTW_RATE_SECTION_OFDM;
else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7)
rs = RTW_RATE_SECTION_HT_1S;
else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15)
rs = RTW_RATE_SECTION_HT_2S;
else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9)
rs = RTW_RATE_SECTION_VHT_1S;
else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9)
rs = RTW_RATE_SECTION_VHT_2S;
else
goto err;
ch_idx = rtw_channel_to_idx(band, channel);
if (ch_idx < 0)
goto err;
power_limit = get_tx_power_limit(hal, bw, rs, ch_idx, regd);
return power_limit;
err:
WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n",
band, bw, rf_path, rate, channel);
return RTW_MAX_POWER_INDEX;
}
void phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
u8 bw, u8 rs, u8 ch, s8 pwr_limit)
{
struct rtw_hal *hal = &rtwdev->hal;
int ch_idx;
pwr_limit = clamp_t(s8, pwr_limit,
-RTW_MAX_POWER_INDEX, RTW_MAX_POWER_INDEX);
ch_idx = rtw_channel_to_idx(band, ch);
if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX ||
rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) {
WARN(1,
"wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n",
regd, band, bw, rs, ch_idx, pwr_limit);
return;
}
if (band == PHY_BAND_2G)
hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit;
else if (band == PHY_BAND_5G)
hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit;
}
static
void rtw_hw_tx_power_limit_init(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
{
u8 ch;
/* 2.4G channels */
for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++)
hal->tx_pwr_limit_2g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX;
/* 5G channels */
for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++)
hal->tx_pwr_limit_5g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX;
}
void rtw_hw_init_tx_power(struct rtw_hal *hal)
{
u8 regd, path, rate, rs, bw;
/* init tx power by rate offset */
for (path = 0; path < RTW_RF_PATH_MAX; path++) {
for (rate = 0; rate < DESC_RATE_MAX; rate++) {
hal->tx_pwr_by_rate_offset_2g[path][rate] = 0;
hal->tx_pwr_by_rate_offset_5g[path][rate] = 0;
}
}
/* init tx power limit */
for (regd = 0; regd < RTW_REGD_MAX; regd++)
for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
rtw_hw_tx_power_limit_init(hal, regd, bw, rs);
}
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW_PHY_H_
#define __RTW_PHY_H_
#include "debug.h"
extern u8 rtw_cck_rates[];
extern u8 rtw_ofdm_rates[];
extern u8 rtw_ht_1s_rates[];
extern u8 rtw_ht_2s_rates[];
extern u8 rtw_vht_1s_rates[];
extern u8 rtw_vht_2s_rates[];
extern u8 *rtw_rate_section[];
extern u8 rtw_rate_size[];
void rtw_phy_init(struct rtw_dev *rtwdev);
void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev);
u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num);
u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask);
bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data);
bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data);
bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data);
void phy_store_tx_power_by_rate(void *adapter, u32 band, u32 rfpath, u32 txnum,
u32 regaddr, u32 bitmask, u32 data);
void phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
u8 bw, u8 rs, u8 ch, s8 pwr_limit);
void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs);
void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg);
void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data);
void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data);
void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data);
void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
u32 addr, u32 data);
void rtw_hw_init_tx_power(struct rtw_hal *hal);
void rtw_phy_load_tables(struct rtw_dev *rtwdev);
void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel);
void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal);
void rtw_phy_tx_power_limit_config(struct rtw_hal *hal);
#define RTW_DECL_TABLE_PHY_COND_CORE(name, cfg, path) \
const struct rtw_table name ## _tbl = { \
.data = name, \
.size = ARRAY_SIZE(name), \
.parse = rtw_parse_tbl_phy_cond, \
.do_cfg = cfg, \
.rf_path = path, \
}
#define RTW_DECL_TABLE_PHY_COND(name, cfg) \
RTW_DECL_TABLE_PHY_COND_CORE(name, cfg, 0)
#define RTW_DECL_TABLE_RF_RADIO(name, path) \
RTW_DECL_TABLE_PHY_COND_CORE(name, rtw_phy_cfg_rf, RF_PATH_ ## path)
#define RTW_DECL_TABLE_BB_PG(name) \
const struct rtw_table name ## _tbl = { \
.data = name, \
.size = ARRAY_SIZE(name), \
.parse = rtw_parse_tbl_bb_pg, \
}
#define RTW_DECL_TABLE_TXPWR_LMT(name) \
const struct rtw_table name ## _tbl = { \
.data = name, \
.size = ARRAY_SIZE(name), \
.parse = rtw_parse_tbl_txpwr_lmt, \
}
static inline const struct rtw_rfe_def *rtw_get_rfe_def(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
const struct rtw_rfe_def *rfe_def = NULL;
if (chip->rfe_defs_size == 0)
return NULL;
if (efuse->rfe_option < chip->rfe_defs_size)
rfe_def = &chip->rfe_defs[efuse->rfe_option];
rtw_dbg(rtwdev, RTW_DBG_PHY, "use rfe_def[%d]\n", efuse->rfe_option);
return rfe_def;
}
static inline int rtw_check_supported_rfe(struct rtw_dev *rtwdev)
{
const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev);
if (!rfe_def || !rfe_def->phy_pg_tbl || !rfe_def->txpwr_lmt_tbl) {
rtw_err(rtwdev, "rfe %d isn't supported\n",
rtwdev->efuse.rfe_option);
return -ENODEV;
}
return 0;
}
void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi);
#define MASKBYTE0 0xff
#define MASKBYTE1 0xff00
#define MASKBYTE2 0xff0000
#define MASKBYTE3 0xff000000
#define MASKHWORD 0xffff0000
#define MASKLWORD 0x0000ffff
#define MASKDWORD 0xffffffff
#define RFREG_MASK 0xfffff
#define MASK7BITS 0x7f
#define MASK12BITS 0xfff
#define MASKH4BITS 0xf0000000
#define MASK20BITS 0xfffff
#define MASK24BITS 0xffffff
#define MASKH3BYTES 0xffffff00
#define MASKL3BYTES 0x00ffffff
#define MASKBYTE2HIGHNIBBLE 0x00f00000
#define MASKBYTE3LOWNIBBLE 0x0f000000
#define MASKL3BYTES 0x00ffffff
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include "main.h"
#include "fw.h"
#include "ps.h"
#include "mac.h"
#include "debug.h"
static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
{
int ret;
ret = rtw_core_start(rtwdev);
if (ret)
rtw_err(rtwdev, "leave idle state failed\n");
rtw_set_channel(rtwdev);
rtw_flag_clear(rtwdev, RTW_FLAG_INACTIVE_PS);
return ret;
}
int rtw_enter_ips(struct rtw_dev *rtwdev)
{
rtw_flag_set(rtwdev, RTW_FLAG_INACTIVE_PS);
rtw_core_stop(rtwdev);
return 0;
}
static void rtw_restore_port_cfg_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct rtw_dev *rtwdev = data;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = ~0;
rtw_vif_port_config(rtwdev, rtwvif, config);
}
int rtw_leave_ips(struct rtw_dev *rtwdev)
{
int ret;
ret = rtw_ips_pwr_up(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to leave ips state\n");
return ret;
}
rtw_iterate_vifs_atomic(rtwdev, rtw_restore_port_cfg_iter, rtwdev);
return 0;
}
static void rtw_leave_lps_core(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
conf->state = RTW_ALL_ON;
conf->awake_interval = 1;
conf->rlbm = 0;
conf->smart_ps = 0;
rtw_fw_set_pwr_mode(rtwdev);
rtw_flag_clear(rtwdev, RTW_FLAG_LEISURE_PS);
}
static void rtw_enter_lps_core(struct rtw_dev *rtwdev)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
conf->state = RTW_RF_OFF;
conf->awake_interval = 1;
conf->rlbm = 1;
conf->smart_ps = 2;
rtw_fw_set_pwr_mode(rtwdev);
rtw_flag_set(rtwdev, RTW_FLAG_LEISURE_PS);
}
void rtw_lps_work(struct work_struct *work)
{
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
lps_work.work);
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
struct rtw_vif *rtwvif = conf->rtwvif;
if (WARN_ON(!rtwvif))
return;
if (conf->mode == RTW_MODE_LPS)
rtw_enter_lps_core(rtwdev);
else
rtw_leave_lps_core(rtwdev);
}
void rtw_enter_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
if (rtwvif->in_lps)
return;
conf->mode = RTW_MODE_LPS;
conf->rtwvif = rtwvif;
rtwvif->in_lps = true;
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->lps_work, 0);
}
void rtw_leave_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
if (!rtwvif->in_lps)
return;
conf->mode = RTW_MODE_ACTIVE;
conf->rtwvif = rtwvif;
rtwvif->in_lps = false;
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->lps_work, 0);
}
bool rtw_in_lps(struct rtw_dev *rtwdev)
{
return rtw_flag_check(rtwdev, RTW_FLAG_LEISURE_PS);
}
void rtw_enter_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
if (WARN_ON(!rtwvif))
return;
if (rtwvif->in_lps)
return;
conf->mode = RTW_MODE_LPS;
conf->rtwvif = rtwvif;
rtwvif->in_lps = true;
rtw_enter_lps_core(rtwdev);
}
void rtw_leave_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
{
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
if (WARN_ON(!rtwvif))
return;
if (!rtwvif->in_lps)
return;
conf->mode = RTW_MODE_ACTIVE;
conf->rtwvif = rtwvif;
rtwvif->in_lps = false;
rtw_leave_lps_core(rtwdev);
}
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW_PS_H_
#define __RTW_PS_H_
#define RTW_LPS_THRESHOLD 2
int rtw_enter_ips(struct rtw_dev *rtwdev);
int rtw_leave_ips(struct rtw_dev *rtwdev);
void rtw_lps_work(struct work_struct *work);
void rtw_enter_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
void rtw_leave_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
void rtw_enter_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
void rtw_leave_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
bool rtw_in_lps(struct rtw_dev *rtwdev);
#endif
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW_REG_DEF_H__
#define __RTW_REG_DEF_H__
#define REG_SYS_FUNC_EN 0x0002
#define BIT_FEN_CPUEN BIT(2)
#define BIT_FEN_BB_GLB_RST BIT(1)
#define BIT_FEN_BB_RSTB BIT(0)
#define REG_SYS_PW_CTRL 0x0004
#define REG_SYS_CLK_CTRL 0x0008
#define BIT_CPU_CLK_EN BIT(14)
#define REG_RSV_CTRL 0x001C
#define DISABLE_PI 0x3
#define ENABLE_PI 0x2
#define BITS_RFC_DIRECT (BIT(31) | BIT(30))
#define BIT_WLMCU_IOIF BIT(0)
#define REG_RF_CTRL 0x001F
#define BIT_RF_SDM_RSTB BIT(2)
#define BIT_RF_RSTB BIT(1)
#define BIT_RF_EN BIT(0)
#define REG_AFE_CTRL1 0x0024
#define BIT_MAC_CLK_SEL (BIT(20) | BIT(21))
#define REG_EFUSE_CTRL 0x0030
#define BIT_EF_FLAG BIT(31)
#define BIT_SHIFT_EF_ADDR 8
#define BIT_MASK_EF_ADDR 0x3ff
#define BIT_MASK_EF_DATA 0xff
#define BITS_EF_ADDR (BIT_MASK_EF_ADDR << BIT_SHIFT_EF_ADDR)
#define REG_LDO_EFUSE_CTRL 0x0034
#define BIT_MASK_EFUSE_BANK_SEL (BIT(8) | BIT(9))
#define REG_GPIO_MUXCFG 0x0040
#define BIT_FSPI_EN BIT(19)
#define BIT_WLRFE_4_5_EN BIT(2)
#define REG_LED_CFG 0x004C
#define BIT_LNAON_SEL_EN BIT(26)
#define BIT_PAPE_SEL_EN BIT(25)
#define REG_PAD_CTRL1 0x0064
#define BIT_PAPE_WLBT_SEL BIT(29)
#define BIT_LNAON_WLBT_SEL BIT(28)
#define REG_WL_BT_PWR_CTRL 0x0068
#define BIT_BT_FUNC_EN BIT(18)
#define BIT_BT_DIG_CLK_EN BIT(8)
#define REG_HCI_OPT_CTRL 0x0074
#define REG_MCUFW_CTRL 0x0080
#define BIT_ANA_PORT_EN BIT(22)
#define BIT_MAC_PORT_EN BIT(21)
#define BIT_BOOT_FSPI_EN BIT(20)
#define BIT_FW_INIT_RDY BIT(15)
#define BIT_FW_DW_RDY BIT(14)
#define BIT_RPWM_TOGGLE BIT(7)
#define BIT_DMEM_CHKSUM_OK BIT(6)
#define BIT_DMEM_DW_OK BIT(5)
#define BIT_IMEM_CHKSUM_OK BIT(4)
#define BIT_IMEM_DW_OK BIT(3)
#define BIT_IMEM_BOOT_LOAD_CHECKSUM_OK BIT(2)
#define BIT_MCUFWDL_EN BIT(0)
#define BIT_CHECK_SUM_OK (BIT(4) | BIT(6))
#define FW_READY (BIT_FW_INIT_RDY | BIT_FW_DW_RDY | \
BIT_IMEM_DW_OK | BIT_DMEM_DW_OK | \
BIT_CHECK_SUM_OK)
#define FW_READY_MASK 0xffff
#define REG_WLRF1 0x00EC
#define REG_SYS_CFG1 0x00F0
#define BIT_RTL_ID BIT(23)
#define BIT_RF_TYPE_ID BIT(27)
#define BIT_SHIFT_VENDOR_ID 16
#define BIT_MASK_VENDOR_ID 0xf
#define BIT_VENDOR_ID(x) (((x) & BIT_MASK_VENDOR_ID) << BIT_SHIFT_VENDOR_ID)
#define BITS_VENDOR_ID (BIT_MASK_VENDOR_ID << BIT_SHIFT_VENDOR_ID)
#define BIT_CLEAR_VENDOR_ID(x) ((x) & (~BITS_VENDOR_ID))
#define BIT_GET_VENDOR_ID(x) (((x) >> BIT_SHIFT_VENDOR_ID) & BIT_MASK_VENDOR_ID)
#define BIT_SHIFT_CHIP_VER 12
#define BIT_MASK_CHIP_VER 0xf
#define BIT_CHIP_VER(x) (((x) & BIT_MASK_CHIP_VER) << BIT_SHIFT_CHIP_VER)
#define BITS_CHIP_VER (BIT_MASK_CHIP_VER << BIT_SHIFT_CHIP_VER)
#define BIT_CLEAR_CHIP_VER(x) ((x) & (~BITS_CHIP_VER))
#define BIT_GET_CHIP_VER(x) (((x) >> BIT_SHIFT_CHIP_VER) & BIT_MASK_CHIP_VER)
#define REG_SYS_STATUS1 0x00F4
#define REG_SYS_STATUS2 0x00F8
#define REG_SYS_CFG2 0x00FC
#define REG_WLRF1 0x00EC
#define BIT_WLRF1_BBRF_EN (BIT(24) | BIT(25) | BIT(26))
#define REG_CR 0x0100
#define BIT_32K_CAL_TMR_EN BIT(10)
#define BIT_MAC_SEC_EN BIT(9)
#define BIT_ENSWBCN BIT(8)
#define BIT_MACRXEN BIT(7)
#define BIT_MACTXEN BIT(6)
#define BIT_SCHEDULE_EN BIT(5)
#define BIT_PROTOCOL_EN BIT(4)
#define BIT_RXDMA_EN BIT(3)
#define BIT_TXDMA_EN BIT(2)
#define BIT_HCI_RXDMA_EN BIT(1)
#define BIT_HCI_TXDMA_EN BIT(0)
#define MAC_TRX_ENABLE (BIT_HCI_TXDMA_EN | BIT_HCI_RXDMA_EN | BIT_TXDMA_EN | \
BIT_RXDMA_EN | BIT_PROTOCOL_EN | BIT_SCHEDULE_EN | \
BIT_MACTXEN | BIT_MACRXEN)
#define BIT_SHIFT_TXDMA_VOQ_MAP 4
#define BIT_MASK_TXDMA_VOQ_MAP 0x3
#define BIT_TXDMA_VOQ_MAP(x) \
(((x) & BIT_MASK_TXDMA_VOQ_MAP) << BIT_SHIFT_TXDMA_VOQ_MAP)
#define BIT_SHIFT_TXDMA_VIQ_MAP 6
#define BIT_MASK_TXDMA_VIQ_MAP 0x3
#define BIT_TXDMA_VIQ_MAP(x) \
(((x) & BIT_MASK_TXDMA_VIQ_MAP) << BIT_SHIFT_TXDMA_VIQ_MAP)
#define REG_TXDMA_PQ_MAP 0x010C
#define BIT_SHIFT_TXDMA_BEQ_MAP 8
#define BIT_MASK_TXDMA_BEQ_MAP 0x3
#define BIT_TXDMA_BEQ_MAP(x) \
(((x) & BIT_MASK_TXDMA_BEQ_MAP) << BIT_SHIFT_TXDMA_BEQ_MAP)
#define BIT_SHIFT_TXDMA_BKQ_MAP 10
#define BIT_MASK_TXDMA_BKQ_MAP 0x3
#define BIT_TXDMA_BKQ_MAP(x) \
(((x) & BIT_MASK_TXDMA_BKQ_MAP) << BIT_SHIFT_TXDMA_BKQ_MAP)
#define BIT_SHIFT_TXDMA_MGQ_MAP 12
#define BIT_MASK_TXDMA_MGQ_MAP 0x3
#define BIT_TXDMA_MGQ_MAP(x) \
(((x) & BIT_MASK_TXDMA_MGQ_MAP) << BIT_SHIFT_TXDMA_MGQ_MAP)
#define BIT_SHIFT_TXDMA_HIQ_MAP 14
#define BIT_MASK_TXDMA_HIQ_MAP 0x3
#define BIT_TXDMA_HIQ_MAP(x) \
(((x) & BIT_MASK_TXDMA_HIQ_MAP) << BIT_SHIFT_TXDMA_HIQ_MAP)
#define BIT_SHIFT_TXSC_40M 4
#define BIT_MASK_TXSC_40M 0xf
#define BIT_TXSC_40M(x) \
(((x) & BIT_MASK_TXSC_40M) << BIT_SHIFT_TXSC_40M)
#define BIT_SHIFT_TXSC_20M 0
#define BIT_MASK_TXSC_20M 0xf
#define BIT_TXSC_20M(x) \
(((x) & BIT_MASK_TXSC_20M) << BIT_SHIFT_TXSC_20M)
#define BIT_SHIFT_MAC_CLK_SEL 20
#define MAC_CLK_HW_DEF_80M 0
#define MAC_CLK_HW_DEF_40M 1
#define MAC_CLK_HW_DEF_20M 2
#define MAC_CLK_SPEED 80
#define REG_CR 0x0100
#define REG_TRXFF_BNDY 0x0114
#define REG_RXFF_BNDY 0x011C
#define REG_PKTBUF_DBG_CTRL 0x0140
#define REG_C2HEVT 0x01A0
#define REG_HMETFR 0x01CC
#define REG_HMEBOX0 0x01D0
#define REG_HMEBOX1 0x01D4
#define REG_HMEBOX2 0x01D8
#define REG_HMEBOX3 0x01DC
#define REG_HMEBOX0_EX 0x01F0
#define REG_HMEBOX1_EX 0x01F4
#define REG_HMEBOX2_EX 0x01F8
#define REG_HMEBOX3_EX 0x01FC
#define REG_FIFOPAGE_CTRL_2 0x0204
#define BIT_BCN_VALID_V1 BIT(15)
#define BIT_MASK_BCN_HEAD_1_V1 0xfff
#define REG_AUTO_LLT_V1 0x0208
#define BIT_AUTO_INIT_LLT_V1 BIT(0)
#define REG_TXDMA_OFFSET_CHK 0x020C
#define REG_TXDMA_STATUS 0x0210
#define BTI_PAGE_OVF BIT(2)
#define REG_RQPN_CTRL_1 0x0228
#define REG_RQPN_CTRL_2 0x022C
#define BIT_LD_RQPN BIT(31)
#define REG_FIFOPAGE_INFO_1 0x0230
#define REG_FIFOPAGE_INFO_2 0x0234
#define REG_FIFOPAGE_INFO_3 0x0238
#define REG_FIFOPAGE_INFO_4 0x023C
#define REG_FIFOPAGE_INFO_5 0x0240
#define REG_H2C_HEAD 0x0244
#define REG_H2C_TAIL 0x0248
#define REG_H2C_READ_ADDR 0x024C
#define REG_H2C_INFO 0x0254
#define REG_FWHW_TXQ_CTRL 0x0420
#define BIT_EN_BCNQ_DL BIT(22)
#define BIT_EN_WR_FREE_TAIL BIT(20)
#define REG_BCNQ_BDNY_V1 0x0424
#define REG_LIFETIME_EN 0x0426
#define BIT_BA_PARSER_EN BIT(5)
#define REG_SPEC_SIFS 0x0428
#define REG_DARFRC 0x0430
#define REG_DARFRCH 0x0434
#define REG_RARFRCH 0x043C
#define REG_ARFR0 0x0444
#define REG_ARFRH0 0x0448
#define REG_ARFR1_V1 0x044C
#define REG_ARFRH1_V1 0x0450
#define REG_CCK_CHECK 0x0454
#define BIT_CHECK_CCK_EN BIT(7)
#define REG_AMPDU_MAX_TIME_V1 0x0455
#define REG_BCNQ1_BDNY_V1 0x0456
#define REG_TX_HANG_CTRL 0x045E
#define BIT_EN_EOF_V1 BIT(2)
#define REG_DATA_SC 0x0483
#define REG_ARFR4 0x049C
#define REG_ARFRH4 0x04A0
#define REG_ARFR5 0x04A4
#define REG_ARFRH5 0x04A8
#define REG_SW_AMPDU_BURST_MODE_CTRL 0x04BC
#define BIT_PRE_TX_CMD BIT(6)
#define REG_PROT_MODE_CTRL 0x04C8
#define REG_BAR_MODE_CTRL 0x04CC
#define REG_PRECNT_CTRL 0x04E5
#define BIT_EN_PRECNT BIT(11)
#define REG_EDCA_VO_PARAM 0x0500
#define REG_EDCA_VI_PARAM 0x0504
#define REG_EDCA_BE_PARAM 0x0508
#define REG_EDCA_BK_PARAM 0x050C
#define REG_PIFS 0x0512
#define REG_SIFS 0x0514
#define BIT_SHIFT_SIFS_OFDM_CTX 8
#define BIT_SHIFT_SIFS_CCK_TRX 16
#define BIT_SHIFT_SIFS_OFDM_TRX 24
#define REG_SLOT 0x051B
#define REG_TX_PTCL_CTRL 0x0520
#define BIT_SIFS_BK_EN BIT(12)
#define REG_TXPAUSE 0x0522
#define REG_RD_CTRL 0x0524
#define BIT_DIS_TXOP_CFE BIT(10)
#define BIT_DIS_LSIG_CFE BIT(9)
#define BIT_DIS_STBC_CFE BIT(8)
#define REG_TBTT_PROHIBIT 0x0540
#define BIT_SHIFT_TBTT_HOLD_TIME_AP 8
#define REG_RD_NAV_NXT 0x0544
#define REG_BCN_CTRL 0x0550
#define BIT_DIS_TSF_UDT BIT(4)
#define BIT_EN_BCN_FUNCTION BIT(3)
#define REG_BCN_CTRL_CLINT0 0x0551
#define REG_DRVERLYINT 0x0558
#define REG_BCNDMATIM 0x0559
#define REG_USTIME_TSF 0x055C
#define REG_BCN_MAX_ERR 0x055D
#define REG_RXTSF_OFFSET_CCK 0x055E
#define REG_MISC_CTRL 0x0577
#define BIT_EN_FREE_CNT BIT(3)
#define BIT_DIS_SECOND_CCA (BIT(0) | BIT(1))
#define REG_TIMER0_SRC_SEL 0x05B4
#define BIT_TSFT_SEL_TIMER0 (BIT(4) | BIT(5) | BIT(6))
#define REG_TCR 0x0604
#define REG_RCR 0x0608
#define BIT_APP_FCS BIT(31)
#define BIT_APP_MIC BIT(30)
#define BIT_APP_ICV BIT(29)
#define BIT_APP_PHYSTS BIT(28)
#define BIT_APP_BASSN BIT(27)
#define BIT_VHT_DACK BIT(26)
#define BIT_TCPOFLD_EN BIT(25)
#define BIT_ENMBID BIT(24)
#define BIT_LSIGEN BIT(23)
#define BIT_MFBEN BIT(22)
#define BIT_DISCHKPPDLLEN BIT(21)
#define BIT_PKTCTL_DLEN BIT(20)
#define BIT_TIM_PARSER_EN BIT(18)
#define BIT_BC_MD_EN BIT(17)
#define BIT_UC_MD_EN BIT(16)
#define BIT_RXSK_PERPKT BIT(15)
#define BIT_HTC_LOC_CTRL BIT(14)
#define BIT_RPFM_CAM_ENABLE BIT(12)
#define BIT_TA_BCN BIT(11)
#define BIT_DISDECMYPKT BIT(10)
#define BIT_AICV BIT(9)
#define BIT_ACRC32 BIT(8)
#define BIT_CBSSID_BCN BIT(7)
#define BIT_CBSSID_DATA BIT(6)
#define BIT_APWRMGT BIT(5)
#define BIT_ADD3 BIT(4)
#define BIT_AB BIT(3)
#define BIT_AM BIT(2)
#define BIT_APM BIT(1)
#define BIT_AAP BIT(0)
#define REG_RX_PKT_LIMIT 0x060C
#define REG_RX_DRVINFO_SZ 0x060F
#define BIT_APP_PHYSTS BIT(28)
#define REG_USTIME_EDCA 0x0638
#define REG_ACKTO_CCK 0x0639
#define REG_RESP_SIFS_CCK 0x063C
#define REG_RESP_SIFS_OFDM 0x063E
#define REG_ACKTO 0x0640
#define REG_EIFS 0x0642
#define REG_NAV_CTRL 0x0650
#define REG_WMAC_TRXPTCL_CTL 0x0668
#define BIT_RFMOD (BIT(7) | BIT(8))
#define BIT_RFMOD_80M BIT(8)
#define BIT_RFMOD_40M BIT(7)
#define REG_WMAC_TRXPTCL_CTL_H 0x066C
#define REG_RXFLTMAP0 0x06A0
#define REG_RXFLTMAP1 0x06A2
#define REG_RXFLTMAP2 0x06A4
#define REG_BBPSF_CTRL 0x06DC
#define REG_WMAC_OPTION_FUNCTION 0x07D0
#define REG_WMAC_OPTION_FUNCTION_1 0x07D4
#define REG_ANAPAR_XTAL_0 0x1040
#define REG_CPU_DMEM_CON 0x1080
#define BIT_WL_PLATFORM_RST BIT(16)
#define BIT_WL_SECURITY_CLK BIT(15)
#define BIT_DDMA_EN BIT(8)
#define REG_H2C_PKT_READADDR 0x10D0
#define REG_H2C_PKT_WRITEADDR 0x10D4
#define REG_FW_DBG7 0x10FC
#define FW_KEY_MASK 0xffffff00
#define REG_CR_EXT 0x1100
#define REG_DDMA_CH0SA 0x1200
#define REG_DDMA_CH0DA 0x1204
#define REG_DDMA_CH0CTRL 0x1208
#define BIT_DDMACH0_OWN BIT(31)
#define BIT_DDMACH0_CHKSUM_EN BIT(29)
#define BIT_DDMACH0_CHKSUM_STS BIT(27)
#define BIT_DDMACH0_RESET_CHKSUM_STS BIT(25)
#define BIT_DDMACH0_CHKSUM_CONT BIT(24)
#define BIT_MASK_DDMACH0_DLEN 0x3ffff
#define REG_H2CQ_CSR 0x1330
#define BIT_H2CQ_FULL BIT(31)
#define REG_FAST_EDCA_VOVI_SETTING 0x1448
#define REG_FAST_EDCA_BEBK_SETTING 0x144C
#define REG_RXPSF_CTRL 0x1610
#define BIT_RXGCK_FIFOTHR_EN BIT(28)
#define BIT_SHIFT_RXGCK_VHT_FIFOTHR 26
#define BIT_MASK_RXGCK_VHT_FIFOTHR 0x3
#define BIT_RXGCK_VHT_FIFOTHR(x) \
(((x) & BIT_MASK_RXGCK_VHT_FIFOTHR) << BIT_SHIFT_RXGCK_VHT_FIFOTHR)
#define BITS_RXGCK_VHT_FIFOTHR \
(BIT_MASK_RXGCK_VHT_FIFOTHR << BIT_SHIFT_RXGCK_VHT_FIFOTHR)
#define BIT_SHIFT_RXGCK_HT_FIFOTHR 24
#define BIT_MASK_RXGCK_HT_FIFOTHR 0x3
#define BIT_RXGCK_HT_FIFOTHR(x) \
(((x) & BIT_MASK_RXGCK_HT_FIFOTHR) << BIT_SHIFT_RXGCK_HT_FIFOTHR)
#define BITS_RXGCK_HT_FIFOTHR \
(BIT_MASK_RXGCK_HT_FIFOTHR << BIT_SHIFT_RXGCK_HT_FIFOTHR)
#define BIT_SHIFT_RXGCK_OFDM_FIFOTHR 22
#define BIT_MASK_RXGCK_OFDM_FIFOTHR 0x3
#define BIT_RXGCK_OFDM_FIFOTHR(x) \
(((x) & BIT_MASK_RXGCK_OFDM_FIFOTHR) << BIT_SHIFT_RXGCK_OFDM_FIFOTHR)
#define BITS_RXGCK_OFDM_FIFOTHR \
(BIT_MASK_RXGCK_OFDM_FIFOTHR << BIT_SHIFT_RXGCK_OFDM_FIFOTHR)
#define BIT_SHIFT_RXGCK_CCK_FIFOTHR 20
#define BIT_MASK_RXGCK_CCK_FIFOTHR 0x3
#define BIT_RXGCK_CCK_FIFOTHR(x) \
(((x) & BIT_MASK_RXGCK_CCK_FIFOTHR) << BIT_SHIFT_RXGCK_CCK_FIFOTHR)
#define BITS_RXGCK_CCK_FIFOTHR \
(BIT_MASK_RXGCK_CCK_FIFOTHR << BIT_SHIFT_RXGCK_CCK_FIFOTHR)
#define BIT_RXGCK_OFDMCCA_EN BIT(16)
#define BIT_SHIFT_RXPSF_PKTLENTHR 13
#define BIT_MASK_RXPSF_PKTLENTHR 0x7
#define BIT_RXPSF_PKTLENTHR(x) \
(((x) & BIT_MASK_RXPSF_PKTLENTHR) << BIT_SHIFT_RXPSF_PKTLENTHR)
#define BITS_RXPSF_PKTLENTHR \
(BIT_MASK_RXPSF_PKTLENTHR << BIT_SHIFT_RXPSF_PKTLENTHR)
#define BIT_CLEAR_RXPSF_PKTLENTHR(x) ((x) & (~BITS_RXPSF_PKTLENTHR))
#define BIT_SET_RXPSF_PKTLENTHR(x, v) \
(BIT_CLEAR_RXPSF_PKTLENTHR(x) | BIT_RXPSF_PKTLENTHR(v))
#define BIT_RXPSF_CTRLEN BIT(12)
#define BIT_RXPSF_VHTCHKEN BIT(11)
#define BIT_RXPSF_HTCHKEN BIT(10)
#define BIT_RXPSF_OFDMCHKEN BIT(9)
#define BIT_RXPSF_CCKCHKEN BIT(8)
#define BIT_RXPSF_OFDMRST BIT(7)
#define BIT_RXPSF_CCKRST BIT(6)
#define BIT_RXPSF_MHCHKEN BIT(5)
#define BIT_RXPSF_CONT_ERRCHKEN BIT(4)
#define BIT_RXPSF_ALL_ERRCHKEN BIT(3)
#define BIT_SHIFT_RXPSF_ERRTHR 0
#define BIT_MASK_RXPSF_ERRTHR 0x7
#define BIT_RXPSF_ERRTHR(x) \
(((x) & BIT_MASK_RXPSF_ERRTHR) << BIT_SHIFT_RXPSF_ERRTHR)
#define BITS_RXPSF_ERRTHR (BIT_MASK_RXPSF_ERRTHR << BIT_SHIFT_RXPSF_ERRTHR)
#define BIT_CLEAR_RXPSF_ERRTHR(x) ((x) & (~BITS_RXPSF_ERRTHR))
#define BIT_GET_RXPSF_ERRTHR(x) \
(((x) >> BIT_SHIFT_RXPSF_ERRTHR) & BIT_MASK_RXPSF_ERRTHR)
#define BIT_SET_RXPSF_ERRTHR(x, v) \
(BIT_CLEAR_RXPSF_ERRTHR(x) | BIT_RXPSF_ERRTHR(v))
#define REG_RXPSF_TYPE_CTRL 0x1614
#define REG_GENERAL_OPTION 0x1664
#define BIT_DUMMY_FCS_READY_MASK_EN BIT(9)
#define REG_WL2LTECOEX_INDIRECT_ACCESS_CTRL_V1 0x1700
#define REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1 0x1704
#define REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1 0x1708
#define LTECOEX_READY BIT(29)
#define LTECOEX_ACCESS_CTRL REG_WL2LTECOEX_INDIRECT_ACCESS_CTRL_V1
#define LTECOEX_WRITE_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1
#define LTECOEX_READ_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1
#define RF_DTXLOK 0x08
#define RF_CFGCH 0x18
#define RF_LUTWA 0x33
#define RF_LUTWD1 0x3e
#define RF_LUTWD0 0x3f
#define RF_XTALX2 0xb8
#define RF_MALSEL 0xbe
#define RF_LUTDBG 0xdf
#define RF_LUTWE2 0xee
#define RF_LUTWE 0xef
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include "main.h"
#include "regd.h"
#include "debug.h"
#include "phy.h"
#define COUNTRY_CHPLAN_ENT(_alpha2, _chplan, _txpwr_regd) \
{.alpha2 = (_alpha2), \
.chplan = (_chplan), \
.txpwr_regd = (_txpwr_regd) \
}
/* If country code is not correctly defined in efuse,
* use worldwide country code and txpwr regd.
*/
static const struct rtw_regulatory rtw_defined_chplan =
COUNTRY_CHPLAN_ENT("00", RTW_CHPLAN_REALTEK_DEFINE, RTW_REGD_WW);
static const struct rtw_regulatory all_chplan_map[] = {
COUNTRY_CHPLAN_ENT("AD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AF", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AO", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AR", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("AS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("AT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("AW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("AZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BB", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("BD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BH", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BO", RTW_CHPLAN_WORLD_FCC7, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("BR", RTW_CHPLAN_FCC2_FCC1, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("BS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("BW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("BZ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("CA", RTW_CHPLAN_IC1_IC2, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("CC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CL", RTW_CHPLAN_WORLD_CHILE1, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("CM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("CR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("CV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CX", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("CZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("DE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("DJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("DK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("DM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("DO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("DZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("EC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("EE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("EG", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("EH", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("ER", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("ES", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("ET", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("FI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("FJ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("FK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("FM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("FO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("FR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GD", RTW_CHPLAN_FCC1_FCC7, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("GE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GP", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GT", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("GU", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("GW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("GY", RTW_CHPLAN_FCC1_NCC3, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("HK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("HM", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("HN", RTW_CHPLAN_WORLD_FCC5, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("HR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("HT", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("HU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("ID", RTW_CHPLAN_ETSI1_ETSI12, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IL", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("IT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("JE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("JM", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("JO", RTW_CHPLAN_WORLD_ETSI8, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("JP", RTW_CHPLAN_MKK1_MKK1, RTW_REGD_MKK),
COUNTRY_CHPLAN_ENT("KE", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("KG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("KH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("KI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("KN", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("KR", RTW_CHPLAN_KCC1_KCC2, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("KW", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("KY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("KZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("LA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("LB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("LC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("LI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("LK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("LR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("LS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("LT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("LU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("LV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("LY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MA", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("ME", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MF", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("MG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MH", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("MK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("ML", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MP", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("MQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MV", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MX", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("MY", RTW_CHPLAN_WORLD_ETSI20, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("MZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NF", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NG", RTW_CHPLAN_WORLD_ETSI20, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("NL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NP", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("NZ", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("OM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("PA", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("PE", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("PF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("PG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("PH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("PK", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("PL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("PM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("PR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("PT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("PW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("PY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("QA", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("RE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("RO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("RS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("RU", RTW_CHPLAN_WORLD_ETSI14, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("RW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("SE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("SR", RTW_CHPLAN_FCC2_FCC17, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("ST", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("SV", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("SX", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("SZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TK", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TT", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("TW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("TZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("UA", RTW_CHPLAN_WORLD_ETSI3, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("UG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("US", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("UY", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("UZ", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("VA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("VC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("VE", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("VI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("VN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("VU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("WF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("WS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
COUNTRY_CHPLAN_ENT("YE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("YT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("ZA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("ZM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
COUNTRY_CHPLAN_ENT("ZW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
};
static void rtw_regd_apply_beaconing_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator)
{
enum nl80211_band band;
struct ieee80211_supported_band *sband;
const struct ieee80211_reg_rule *reg_rule;
struct ieee80211_channel *ch;
unsigned int i;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!wiphy->bands[band])
continue;
sband = wiphy->bands[band];
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
reg_rule = freq_reg_info(wiphy,
MHZ_TO_KHZ(ch->center_freq));
if (IS_ERR(reg_rule))
continue;
ch->flags &= ~IEEE80211_CHAN_DISABLED;
if (!(reg_rule->flags & NL80211_RRF_NO_IR))
ch->flags &= ~IEEE80211_CHAN_NO_IR;
}
}
}
static void rtw_regd_apply_hw_cap_flags(struct wiphy *wiphy)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
struct rtw_dev *rtwdev = hw->priv;
struct rtw_efuse *efuse = &rtwdev->efuse;
int i;
if (efuse->hw_cap.bw & BIT(RTW_CHANNEL_WIDTH_80))
return;
sband = wiphy->bands[NL80211_BAND_2GHZ];
if (!sband)
goto out_5g;
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
ch->flags |= IEEE80211_CHAN_NO_80MHZ;
}
out_5g:
sband = wiphy->bands[NL80211_BAND_5GHZ];
if (!sband)
return;
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
ch->flags |= IEEE80211_CHAN_NO_80MHZ;
}
}
static void rtw_regd_apply_world_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator)
{
rtw_regd_apply_beaconing_flags(wiphy, initiator);
}
static struct rtw_regulatory rtw_regd_find_reg_by_name(char *alpha2)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(all_chplan_map); i++) {
if (!memcmp(all_chplan_map[i].alpha2, alpha2, 2))
return all_chplan_map[i];
}
return rtw_defined_chplan;
}
static int rtw_regd_notifier_apply(struct rtw_dev *rtwdev,
struct wiphy *wiphy,
struct regulatory_request *request)
{
if (request->initiator == NL80211_REGDOM_SET_BY_USER)
return 0;
rtwdev->regd = rtw_regd_find_reg_by_name(request->alpha2);
rtw_regd_apply_world_flags(wiphy, request->initiator);
return 0;
}
static int
rtw_regd_init_wiphy(struct rtw_regulatory *reg, struct wiphy *wiphy,
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request))
{
wiphy->reg_notifier = reg_notifier;
wiphy->regulatory_flags &= ~REGULATORY_CUSTOM_REG;
wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG;
wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS;
rtw_regd_apply_hw_cap_flags(wiphy);
return 0;
}
int rtw_regd_init(struct rtw_dev *rtwdev,
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request))
{
struct wiphy *wiphy = rtwdev->hw->wiphy;
if (!wiphy)
return -EINVAL;
rtwdev->regd = rtw_regd_find_reg_by_name(rtwdev->efuse.country_code);
rtw_regd_init_wiphy(&rtwdev->regd, wiphy, reg_notifier);
return 0;
}
void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rtw_dev *rtwdev = hw->priv;
struct rtw_hal *hal = &rtwdev->hal;
rtw_regd_notifier_apply(rtwdev, wiphy, request);
rtw_dbg(rtwdev, RTW_DBG_REGD,
"get alpha2 %c%c from initiator %d, mapping to chplan 0x%x, txregd %d\n",
request->alpha2[0], request->alpha2[1], request->initiator,
rtwdev->regd.chplan, rtwdev->regd.txpwr_regd);
rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
}
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW_REGD_H_
#define __RTW_REGD_H_
#define IEEE80211_CHAN_NO_IBSS IEEE80211_CHAN_NO_IR
#define IEEE80211_CHAN_PASSIVE_SCAN IEEE80211_CHAN_NO_IR
enum rtw_chplan_id {
RTW_CHPLAN_WORLD_ETSI1 = 0x26,
RTW_CHPLAN_MKK1_MKK1 = 0x27,
RTW_CHPLAN_IC1_IC2 = 0x2B,
RTW_CHPLAN_WORLD_CHILE1 = 0x2D,
RTW_CHPLAN_WORLD_FCC3 = 0x30,
RTW_CHPLAN_WORLD_FCC5 = 0x32,
RTW_CHPLAN_FCC1_FCC7 = 0x34,
RTW_CHPLAN_WORLD_ETSI3 = 0x36,
RTW_CHPLAN_ETSI1_ETSI12 = 0x3D,
RTW_CHPLAN_KCC1_KCC2 = 0x3E,
RTW_CHPLAN_ETSI1_ETSI4 = 0x42,
RTW_CHPLAN_FCC1_NCC3 = 0x44,
RTW_CHPLAN_WORLD_ACMA1 = 0x45,
RTW_CHPLAN_WORLD_ETSI6 = 0x47,
RTW_CHPLAN_WORLD_ETSI7 = 0x48,
RTW_CHPLAN_WORLD_ETSI8 = 0x49,
RTW_CHPLAN_WORLD_ETSI10 = 0x51,
RTW_CHPLAN_WORLD_ETSI14 = 0x59,
RTW_CHPLAN_FCC2_FCC7 = 0x61,
RTW_CHPLAN_FCC2_FCC1 = 0x62,
RTW_CHPLAN_WORLD_FCC7 = 0x73,
RTW_CHPLAN_FCC2_FCC17 = 0x74,
RTW_CHPLAN_WORLD_ETSI20 = 0x75,
RTW_CHPLAN_FCC2_FCC11 = 0x76,
RTW_CHPLAN_REALTEK_DEFINE = 0x7f,
};
struct country_code_to_enum_rd {
u16 countrycode;
const char *iso_name;
};
enum country_code_type {
COUNTRY_CODE_FCC = 0,
COUNTRY_CODE_IC = 1,
COUNTRY_CODE_ETSI = 2,
COUNTRY_CODE_SPAIN = 3,
COUNTRY_CODE_FRANCE = 4,
COUNTRY_CODE_MKK = 5,
COUNTRY_CODE_MKK1 = 6,
COUNTRY_CODE_ISRAEL = 7,
COUNTRY_CODE_TELEC = 8,
COUNTRY_CODE_MIC = 9,
COUNTRY_CODE_GLOBAL_DOMAIN = 10,
COUNTRY_CODE_WORLD_WIDE_13 = 11,
COUNTRY_CODE_TELEC_NETGEAR = 12,
COUNTRY_CODE_WORLD_WIDE_13_5G_ALL = 13,
/* new channel plan above this */
COUNTRY_CODE_MAX
};
int rtw_regd_init(struct rtw_dev *rtwdev,
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request));
void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include "main.h"
#include "fw.h"
#include "tx.h"
#include "rx.h"
#include "phy.h"
#include "rtw8822b.h"
#include "rtw8822b_table.h"
#include "mac.h"
#include "reg.h"
#include "debug.h"
static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
static void rtw8822be_efuse_parsing(struct rtw_efuse *efuse,
struct rtw8822b_efuse *map)
{
ether_addr_copy(efuse->addr, map->e.mac_addr);
}
static int rtw8822b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw8822b_efuse *map;
int i;
map = (struct rtw8822b_efuse *)log_map;
efuse->rfe_option = map->rfe_option;
efuse->crystal_cap = map->xtal_k;
efuse->pa_type_2g = map->pa_type;
efuse->pa_type_5g = map->pa_type;
efuse->lna_type_2g = map->lna_type_2g[0];
efuse->lna_type_5g = map->lna_type_5g[0];
efuse->channel_plan = map->channel_plan;
efuse->country_code[0] = map->country_code[0];
efuse->country_code[1] = map->country_code[1];
efuse->bt_setting = map->rf_bt_setting;
efuse->regd = map->rf_board_option & 0x7;
for (i = 0; i < 4; i++)
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
rtw8822be_efuse_parsing(efuse, map);
break;
default:
/* unsupported now */
return -ENOTSUPP;
}
return 0;
}
static void rtw8822b_phy_rfe_init(struct rtw_dev *rtwdev)
{
/* chip top mux */
rtw_write32_mask(rtwdev, 0x64, BIT(29) | BIT(28), 0x3);
rtw_write32_mask(rtwdev, 0x4c, BIT(26) | BIT(25), 0x0);
rtw_write32_mask(rtwdev, 0x40, BIT(2), 0x1);
/* from s0 or s1 */
rtw_write32_mask(rtwdev, 0x1990, 0x3f, 0x30);
rtw_write32_mask(rtwdev, 0x1990, (BIT(11) | BIT(10)), 0x3);
/* input or output */
rtw_write32_mask(rtwdev, 0x974, 0x3f, 0x3f);
rtw_write32_mask(rtwdev, 0x974, (BIT(11) | BIT(10)), 0x3);
}
static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
u8 crystal_cap;
bool is_tx2_path;
/* power on BB/RF domain */
rtw_write8_set(rtwdev, REG_SYS_FUNC_EN,
BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
rtw_write8_set(rtwdev, REG_RF_CTRL,
BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB);
rtw_write32_set(rtwdev, REG_WLRF1, BIT_WLRF1_BBRF_EN);
/* pre init before header files config */
rtw_write32_clr(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
rtw_phy_load_tables(rtwdev);
crystal_cap = rtwdev->efuse.crystal_cap & 0x3F;
rtw_write32_mask(rtwdev, 0x24, 0x7e000000, crystal_cap);
rtw_write32_mask(rtwdev, 0x28, 0x7e, crystal_cap);
/* post init after header files config */
rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
is_tx2_path = false;
rtw8822b_config_trx_mode(rtwdev, hal->antenna_tx, hal->antenna_rx,
is_tx2_path);
rtw_phy_init(rtwdev);
rtw8822b_phy_rfe_init(rtwdev);
/* wifi path controller */
rtw_write32_mask(rtwdev, 0x70, 0x4000000, 1);
/* BB control */
rtw_write32_mask(rtwdev, 0x4c, 0x01800000, 0x2);
/* antenna mux switch */
rtw_write8(rtwdev, 0x974, 0xff);
rtw_write32_mask(rtwdev, 0x1990, 0x300, 0);
rtw_write32_mask(rtwdev, 0xcbc, 0x80000, 0x0);
/* SW control */
rtw_write8(rtwdev, 0xcb4, 0x77);
/* switch to WL side controller and gnt_wl gnt_bt debug signal */
rtw_write32_mask(rtwdev, 0x70, 0xff000000, 0x0e);
/* gnt_wl = 1, gnt_bt = 0 */
rtw_write32(rtwdev, 0x1704, 0x7700);
rtw_write32(rtwdev, 0x1700, 0xc00f0038);
/* switch for WL 2G */
rtw_write8(rtwdev, 0xcbd, 0x2);
}
#define WLAN_SLOT_TIME 0x09
#define WLAN_PIFS_TIME 0x19
#define WLAN_SIFS_CCK_CONT_TX 0xA
#define WLAN_SIFS_OFDM_CONT_TX 0xE
#define WLAN_SIFS_CCK_TRX 0x10
#define WLAN_SIFS_OFDM_TRX 0x10
#define WLAN_VO_TXOP_LIMIT 0x186 /* unit : 32us */
#define WLAN_VI_TXOP_LIMIT 0x3BC /* unit : 32us */
#define WLAN_RDG_NAV 0x05
#define WLAN_TXOP_NAV 0x1B
#define WLAN_CCK_RX_TSF 0x30
#define WLAN_OFDM_RX_TSF 0x30
#define WLAN_TBTT_PROHIBIT 0x04 /* unit : 32us */
#define WLAN_TBTT_HOLD_TIME 0x064 /* unit : 32us */
#define WLAN_DRV_EARLY_INT 0x04
#define WLAN_BCN_DMA_TIME 0x02
#define WLAN_RX_FILTER0 0x0FFFFFFF
#define WLAN_RX_FILTER2 0xFFFF
#define WLAN_RCR_CFG 0xE400220E
#define WLAN_RXPKT_MAX_SZ 12288
#define WLAN_RXPKT_MAX_SZ_512 (WLAN_RXPKT_MAX_SZ >> 9)
#define WLAN_AMPDU_MAX_TIME 0x70
#define WLAN_RTS_LEN_TH 0xFF
#define WLAN_RTS_TX_TIME_TH 0x08
#define WLAN_MAX_AGG_PKT_LIMIT 0x20
#define WLAN_RTS_MAX_AGG_PKT_LIMIT 0x20
#define FAST_EDCA_VO_TH 0x06
#define FAST_EDCA_VI_TH 0x06
#define FAST_EDCA_BE_TH 0x06
#define FAST_EDCA_BK_TH 0x06
#define WLAN_BAR_RETRY_LIMIT 0x01
#define WLAN_RA_TRY_RATE_AGG_LIMIT 0x08
#define WLAN_TX_FUNC_CFG1 0x30
#define WLAN_TX_FUNC_CFG2 0x30
#define WLAN_MAC_OPT_NORM_FUNC1 0x98
#define WLAN_MAC_OPT_LB_FUNC1 0x80
#define WLAN_MAC_OPT_FUNC2 0x30810041
#define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \
(WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \
(WLAN_SIFS_CCK_TRX << BIT_SHIFT_SIFS_CCK_TRX) | \
(WLAN_SIFS_OFDM_TRX << BIT_SHIFT_SIFS_OFDM_TRX))
#define WLAN_TBTT_TIME (WLAN_TBTT_PROHIBIT |\
(WLAN_TBTT_HOLD_TIME << BIT_SHIFT_TBTT_HOLD_TIME_AP))
#define WLAN_NAV_CFG (WLAN_RDG_NAV | (WLAN_TXOP_NAV << 16))
#define WLAN_RX_TSF_CFG (WLAN_CCK_RX_TSF | (WLAN_OFDM_RX_TSF) << 8)
static int rtw8822b_mac_init(struct rtw_dev *rtwdev)
{
u32 value32;
/* protocol configuration */
rtw_write8_clr(rtwdev, REG_SW_AMPDU_BURST_MODE_CTRL, BIT_PRE_TX_CMD);
rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, WLAN_AMPDU_MAX_TIME);
rtw_write8_set(rtwdev, REG_TX_HANG_CTRL, BIT_EN_EOF_V1);
value32 = WLAN_RTS_LEN_TH | (WLAN_RTS_TX_TIME_TH << 8) |
(WLAN_MAX_AGG_PKT_LIMIT << 16) |
(WLAN_RTS_MAX_AGG_PKT_LIMIT << 24);
rtw_write32(rtwdev, REG_PROT_MODE_CTRL, value32);
rtw_write16(rtwdev, REG_BAR_MODE_CTRL + 2,
WLAN_BAR_RETRY_LIMIT | WLAN_RA_TRY_RATE_AGG_LIMIT << 8);
rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING, FAST_EDCA_VO_TH);
rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING + 2, FAST_EDCA_VI_TH);
rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING, FAST_EDCA_BE_TH);
rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING + 2, FAST_EDCA_BK_TH);
/* EDCA configuration */
rtw_write8_clr(rtwdev, REG_TIMER0_SRC_SEL, BIT_TSFT_SEL_TIMER0);
rtw_write16(rtwdev, REG_TXPAUSE, 0x0000);
rtw_write8(rtwdev, REG_SLOT, WLAN_SLOT_TIME);
rtw_write8(rtwdev, REG_PIFS, WLAN_PIFS_TIME);
rtw_write32(rtwdev, REG_SIFS, WLAN_SIFS_CFG);
rtw_write16(rtwdev, REG_EDCA_VO_PARAM + 2, WLAN_VO_TXOP_LIMIT);
rtw_write16(rtwdev, REG_EDCA_VI_PARAM + 2, WLAN_VI_TXOP_LIMIT);
rtw_write32(rtwdev, REG_RD_NAV_NXT, WLAN_NAV_CFG);
rtw_write16(rtwdev, REG_RXTSF_OFFSET_CCK, WLAN_RX_TSF_CFG);
/* Set beacon cotnrol - enable TSF and other related functions */
rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
/* Set send beacon related registers */
rtw_write32(rtwdev, REG_TBTT_PROHIBIT, WLAN_TBTT_TIME);
rtw_write8(rtwdev, REG_DRVERLYINT, WLAN_DRV_EARLY_INT);
rtw_write8(rtwdev, REG_BCNDMATIM, WLAN_BCN_DMA_TIME);
rtw_write8_clr(rtwdev, REG_TX_PTCL_CTRL + 1, BIT_SIFS_BK_EN >> 8);
/* WMAC configuration */
rtw_write32(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2);
rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG);
rtw_write8(rtwdev, REG_RX_PKT_LIMIT, WLAN_RXPKT_MAX_SZ_512);
rtw_write8(rtwdev, REG_TCR + 2, WLAN_TX_FUNC_CFG2);
rtw_write8(rtwdev, REG_TCR + 1, WLAN_TX_FUNC_CFG1);
rtw_write32(rtwdev, REG_WMAC_OPTION_FUNCTION + 8, WLAN_MAC_OPT_FUNC2);
rtw_write8(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, WLAN_MAC_OPT_NORM_FUNC1);
return 0;
}
static void rtw8822b_set_channel_rfe_efem(struct rtw_dev *rtwdev, u8 channel)
{
struct rtw_hal *hal = &rtwdev->hal;
bool is_channel_2g = (channel <= 14) ? true : false;
if (is_channel_2g) {
rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x705770);
rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x57);
rtw_write32s_mask(rtwdev, REG_RFECTL, BIT(4), 0);
} else {
rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x177517);
rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x75);
rtw_write32s_mask(rtwdev, REG_RFECTL, BIT(5), 0);
}
rtw_write32s_mask(rtwdev, REG_RFEINV, BIT(11) | BIT(10) | 0x3f, 0x0);
if (hal->antenna_rx == BB_PATH_AB ||
hal->antenna_tx == BB_PATH_AB) {
/* 2TX or 2RX */
rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa501);
} else if (hal->antenna_rx == hal->antenna_tx) {
/* TXA+RXA or TXB+RXB */
rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa500);
} else {
/* TXB+RXA or TXA+RXB */
rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa005);
}
}
static void rtw8822b_set_channel_rfe_ifem(struct rtw_dev *rtwdev, u8 channel)
{
struct rtw_hal *hal = &rtwdev->hal;
bool is_channel_2g = (channel <= 14) ? true : false;
if (is_channel_2g) {
/* signal source */
rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x745774);
rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x57);
} else {
/* signal source */
rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x477547);
rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x75);
}
rtw_write32s_mask(rtwdev, REG_RFEINV, BIT(11) | BIT(10) | 0x3f, 0x0);
if (is_channel_2g) {
if (hal->antenna_rx == BB_PATH_AB ||
hal->antenna_tx == BB_PATH_AB) {
/* 2TX or 2RX */
rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa501);
} else if (hal->antenna_rx == hal->antenna_tx) {
/* TXA+RXA or TXB+RXB */
rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa500);
} else {
/* TXB+RXA or TXA+RXB */
rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa005);
}
} else {
rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa5a5);
}
}
enum {
CCUT_IDX_1R_2G,
CCUT_IDX_2R_2G,
CCUT_IDX_1R_5G,
CCUT_IDX_2R_5G,
CCUT_IDX_NR,
};
struct cca_ccut {
u32 reg82c[CCUT_IDX_NR];
u32 reg830[CCUT_IDX_NR];
u32 reg838[CCUT_IDX_NR];
};
static const struct cca_ccut cca_ifem_ccut = {
{0x75C97010, 0x75C97010, 0x75C97010, 0x75C97010}, /*Reg82C*/
{0x79a0eaaa, 0x79A0EAAC, 0x79a0eaaa, 0x79a0eaaa}, /*Reg830*/
{0x87765541, 0x87746341, 0x87765541, 0x87746341}, /*Reg838*/
};
static const struct cca_ccut cca_efem_ccut = {
{0x75B86010, 0x75B76010, 0x75B86010, 0x75B76010}, /*Reg82C*/
{0x79A0EAA8, 0x79A0EAAC, 0x79A0EAA8, 0x79a0eaaa}, /*Reg830*/
{0x87766451, 0x87766431, 0x87766451, 0x87766431}, /*Reg838*/
};
static const struct cca_ccut cca_ifem_ccut_ext = {
{0x75da8010, 0x75da8010, 0x75da8010, 0x75da8010}, /*Reg82C*/
{0x79a0eaaa, 0x97A0EAAC, 0x79a0eaaa, 0x79a0eaaa}, /*Reg830*/
{0x87765541, 0x86666341, 0x87765561, 0x86666361}, /*Reg838*/
};
static void rtw8822b_get_cca_val(const struct cca_ccut *cca_ccut, u8 col,
u32 *reg82c, u32 *reg830, u32 *reg838)
{
*reg82c = cca_ccut->reg82c[col];
*reg830 = cca_ccut->reg830[col];
*reg838 = cca_ccut->reg838[col];
}
struct rtw8822b_rfe_info {
const struct cca_ccut *cca_ccut_2g;
const struct cca_ccut *cca_ccut_5g;
enum rtw_rfe_fem fem;
bool ifem_ext;
void (*rtw_set_channel_rfe)(struct rtw_dev *rtwdev, u8 channel);
};
#define I2GE5G_CCUT(set_ch) { \
.cca_ccut_2g = &cca_ifem_ccut, \
.cca_ccut_5g = &cca_efem_ccut, \
.fem = RTW_RFE_IFEM2G_EFEM5G, \
.ifem_ext = false, \
.rtw_set_channel_rfe = &rtw8822b_set_channel_rfe_ ## set_ch, \
}
#define IFEM_EXT_CCUT(set_ch) { \
.cca_ccut_2g = &cca_ifem_ccut_ext, \
.cca_ccut_5g = &cca_ifem_ccut_ext, \
.fem = RTW_RFE_IFEM, \
.ifem_ext = true, \
.rtw_set_channel_rfe = &rtw8822b_set_channel_rfe_ ## set_ch, \
}
static const struct rtw8822b_rfe_info rtw8822b_rfe_info[] = {
[2] = I2GE5G_CCUT(efem),
[5] = IFEM_EXT_CCUT(ifem),
};
static void rtw8822b_set_channel_cca(struct rtw_dev *rtwdev, u8 channel, u8 bw,
const struct rtw8822b_rfe_info *rfe_info)
{
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_efuse *efuse = &rtwdev->efuse;
const struct cca_ccut *cca_ccut;
u8 col;
u32 reg82c, reg830, reg838;
bool is_efem_cca = false, is_ifem_cca = false, is_rfe_type = false;
if (channel <= 14) {
cca_ccut = rfe_info->cca_ccut_2g;
if (hal->antenna_rx == BB_PATH_A ||
hal->antenna_rx == BB_PATH_B)
col = CCUT_IDX_1R_2G;
else
col = CCUT_IDX_2R_2G;
} else {
cca_ccut = rfe_info->cca_ccut_5g;
if (hal->antenna_rx == BB_PATH_A ||
hal->antenna_rx == BB_PATH_B)
col = CCUT_IDX_1R_5G;
else
col = CCUT_IDX_2R_5G;
}
rtw8822b_get_cca_val(cca_ccut, col, &reg82c, &reg830, &reg838);
switch (rfe_info->fem) {
case RTW_RFE_IFEM:
default:
is_ifem_cca = true;
if (rfe_info->ifem_ext)
is_rfe_type = true;
break;
case RTW_RFE_EFEM:
is_efem_cca = true;
break;
case RTW_RFE_IFEM2G_EFEM5G:
if (channel <= 14)
is_ifem_cca = true;
else
is_efem_cca = true;
break;
}
if (is_ifem_cca) {
if ((hal->cut_version == RTW_CHIP_VER_CUT_B &&
(col == CCUT_IDX_2R_2G || col == CCUT_IDX_2R_5G) &&
bw == RTW_CHANNEL_WIDTH_40) ||
(!is_rfe_type && col == CCUT_IDX_2R_5G &&
bw == RTW_CHANNEL_WIDTH_40) ||
(efuse->rfe_option == 5 && col == CCUT_IDX_2R_5G))
reg830 = 0x79a0ea28;
}
rtw_write32_mask(rtwdev, REG_CCASEL, MASKDWORD, reg82c);
rtw_write32_mask(rtwdev, REG_PDMFTH, MASKDWORD, reg830);
rtw_write32_mask(rtwdev, REG_CCA2ND, MASKDWORD, reg838);
if (is_efem_cca && !(hal->cut_version == RTW_CHIP_VER_CUT_B))
rtw_write32_mask(rtwdev, REG_L1WT, MASKDWORD, 0x9194b2b9);
if (bw == RTW_CHANNEL_WIDTH_20 &&
((channel >= 52 && channel <= 64) ||
(channel >= 100 && channel <= 144)))
rtw_write32_mask(rtwdev, REG_CCA2ND, 0xf0, 0x4);
}
static const u8 low_band[15] = {0x7, 0x6, 0x6, 0x5, 0x0, 0x0, 0x7, 0xff, 0x6,
0x5, 0x0, 0x0, 0x7, 0x6, 0x6};
static const u8 middle_band[23] = {0x6, 0x5, 0x0, 0x0, 0x7, 0x6, 0x6, 0xff, 0x0,
0x0, 0x7, 0x6, 0x6, 0x5, 0x0, 0xff, 0x7, 0x6,
0x6, 0x5, 0x0, 0x0, 0x7};
static const u8 high_band[15] = {0x5, 0x5, 0x0, 0x7, 0x7, 0x6, 0x5, 0xff, 0x0,
0x7, 0x7, 0x6, 0x5, 0x5, 0x0};
static void rtw8822b_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
{
#define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8))
#define RF18_BAND_2G (0)
#define RF18_BAND_5G (BIT(16) | BIT(8))
#define RF18_CHANNEL_MASK (MASKBYTE0)
#define RF18_RFSI_MASK (BIT(18) | BIT(17))
#define RF18_RFSI_GE_CH80 (BIT(17))
#define RF18_RFSI_GT_CH144 (BIT(18))
#define RF18_BW_MASK (BIT(11) | BIT(10))
#define RF18_BW_20M (BIT(11) | BIT(10))
#define RF18_BW_40M (BIT(11))
#define RF18_BW_80M (BIT(10))
#define RFBE_MASK (BIT(17) | BIT(16) | BIT(15))
struct rtw_hal *hal = &rtwdev->hal;
u32 rf_reg18, rf_reg_be;
rf_reg18 = rtw_read_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK);
rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK |
RF18_BW_MASK);
rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G);
rf_reg18 |= (channel & RF18_CHANNEL_MASK);
if (channel > 144)
rf_reg18 |= RF18_RFSI_GT_CH144;
else if (channel >= 80)
rf_reg18 |= RF18_RFSI_GE_CH80;
switch (bw) {
case RTW_CHANNEL_WIDTH_5:
case RTW_CHANNEL_WIDTH_10:
case RTW_CHANNEL_WIDTH_20:
default:
rf_reg18 |= RF18_BW_20M;
break;
case RTW_CHANNEL_WIDTH_40:
rf_reg18 |= RF18_BW_40M;
break;
case RTW_CHANNEL_WIDTH_80:
rf_reg18 |= RF18_BW_80M;
break;
}
if (channel <= 14)
rf_reg_be = 0x0;
else if (channel >= 36 && channel <= 64)
rf_reg_be = low_band[(channel - 36) >> 1];
else if (channel >= 100 && channel <= 144)
rf_reg_be = middle_band[(channel - 100) >> 1];
else if (channel >= 149 && channel <= 177)
rf_reg_be = high_band[(channel - 149) >> 1];
else
goto err;
rtw_write_rf(rtwdev, RF_PATH_A, RF_MALSEL, RFBE_MASK, rf_reg_be);
/* need to set 0xdf[18]=1 before writing RF18 when channel 144 */
if (channel == 144)
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(18), 0x1);
else
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(18), 0x0);
rtw_write_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK, rf_reg18);
if (hal->rf_type > RF_1T1R)
rtw_write_rf(rtwdev, RF_PATH_B, 0x18, RFREG_MASK, rf_reg18);
rtw_write_rf(rtwdev, RF_PATH_A, RF_XTALX2, BIT(19), 0);
rtw_write_rf(rtwdev, RF_PATH_A, RF_XTALX2, BIT(19), 1);
return;
err:
WARN_ON(1);
}
static void rtw8822b_toggle_igi(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
u32 igi;
igi = rtw_read32_mask(rtwdev, REG_RXIGI_A, 0x7f);
rtw_write32_mask(rtwdev, REG_RXIGI_A, 0x7f, igi - 2);
rtw_write32_mask(rtwdev, REG_RXIGI_A, 0x7f, igi);
rtw_write32_mask(rtwdev, REG_RXIGI_B, 0x7f, igi - 2);
rtw_write32_mask(rtwdev, REG_RXIGI_B, 0x7f, igi);
rtw_write32_mask(rtwdev, REG_RXPSEL, MASKBYTE0, 0x0);
rtw_write32_mask(rtwdev, REG_RXPSEL, MASKBYTE0,
hal->antenna_rx | (hal->antenna_rx << 4));
}
static void rtw8822b_set_channel_rxdfir(struct rtw_dev *rtwdev, u8 bw)
{
if (bw == RTW_CHANNEL_WIDTH_40) {
/* RX DFIR for BW40 */
rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x1);
rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x0);
rtw_write32s_mask(rtwdev, REG_TXDFIR, BIT(31), 0x0);
} else if (bw == RTW_CHANNEL_WIDTH_80) {
/* RX DFIR for BW80 */
rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x2);
rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x1);
rtw_write32s_mask(rtwdev, REG_TXDFIR, BIT(31), 0x0);
} else {
/* RX DFIR for BW20, BW10 and BW5*/
rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x2);
rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x2);
rtw_write32s_mask(rtwdev, REG_TXDFIR, BIT(31), 0x1);
}
}
static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 primary_ch_idx)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 rfe_option = efuse->rfe_option;
u32 val32;
if (channel <= 14) {
rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x1);
rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x0);
rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x0);
rtw_write32_mask(rtwdev, REG_RXCCAMSK, 0x0000FC00, 15);
rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x0);
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x96a);
if (channel == 14) {
rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x00006577);
rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x0000);
} else {
rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x384f6577);
rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x1525);
}
rtw_write32_mask(rtwdev, REG_RFEINV, 0x300, 0x2);
} else if (channel > 35) {
rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1);
rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x1);
rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x0);
rtw_write32_mask(rtwdev, REG_RXCCAMSK, 0x0000FC00, 34);
if (channel >= 36 && channel <= 64)
rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x1);
else if (channel >= 100 && channel <= 144)
rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x2);
else if (channel >= 149)
rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x3);
if (channel >= 36 && channel <= 48)
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x494);
else if (channel >= 52 && channel <= 64)
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x453);
else if (channel >= 100 && channel <= 116)
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x452);
else if (channel >= 118 && channel <= 177)
rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x412);
rtw_write32_mask(rtwdev, 0xcbc, 0x300, 0x1);
}
switch (bw) {
case RTW_CHANNEL_WIDTH_20:
default:
val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
val32 &= 0xFFCFFC00;
val32 |= (RTW_CHANNEL_WIDTH_20);
rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
break;
case RTW_CHANNEL_WIDTH_40:
if (primary_ch_idx == 1)
rtw_write32_set(rtwdev, REG_RXSB, BIT(4));
else
rtw_write32_clr(rtwdev, REG_RXSB, BIT(4));
val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
val32 &= 0xFF3FF300;
val32 |= (((primary_ch_idx & 0xf) << 2) | RTW_CHANNEL_WIDTH_40);
rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
break;
case RTW_CHANNEL_WIDTH_80:
val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
val32 &= 0xFCEFCF00;
val32 |= (((primary_ch_idx & 0xf) << 2) | RTW_CHANNEL_WIDTH_80);
rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
if (rfe_option == 2) {
rtw_write32_mask(rtwdev, REG_L1PKWT, 0x0000f000, 0x6);
rtw_write32_mask(rtwdev, REG_ADC40, BIT(10), 0x1);
}
break;
case RTW_CHANNEL_WIDTH_5:
val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
val32 &= 0xEFEEFE00;
val32 |= ((BIT(6) | RTW_CHANNEL_WIDTH_20));
rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x0);
rtw_write32_mask(rtwdev, REG_ADC40, BIT(31), 0x1);
break;
case RTW_CHANNEL_WIDTH_10:
val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
val32 &= 0xEFFEFF00;
val32 |= ((BIT(7) | RTW_CHANNEL_WIDTH_20));
rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x0);
rtw_write32_mask(rtwdev, REG_ADC40, BIT(31), 0x1);
break;
}
}
static void rtw8822b_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 primary_chan_idx)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
const struct rtw8822b_rfe_info *rfe_info;
if (WARN(efuse->rfe_option >= ARRAY_SIZE(rtw8822b_rfe_info),
"rfe_option %d is out of boundary\n", efuse->rfe_option))
return;
rfe_info = &rtw8822b_rfe_info[efuse->rfe_option];
rtw8822b_set_channel_bb(rtwdev, channel, bw, primary_chan_idx);
rtw_set_channel_mac(rtwdev, channel, bw, primary_chan_idx);
rtw8822b_set_channel_rf(rtwdev, channel, bw);
rtw8822b_set_channel_rxdfir(rtwdev, bw);
rtw8822b_toggle_igi(rtwdev);
rtw8822b_set_channel_cca(rtwdev, channel, bw, rfe_info);
(*rfe_info->rtw_set_channel_rfe)(rtwdev, channel);
}
static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
const struct rtw8822b_rfe_info *rfe_info;
u8 ch = rtwdev->hal.current_channel;
u8 tx_path_sel, rx_path_sel;
int counter;
if (WARN(efuse->rfe_option >= ARRAY_SIZE(rtw8822b_rfe_info),
"rfe_option %d is out of boundary\n", efuse->rfe_option))
return;
rfe_info = &rtw8822b_rfe_info[efuse->rfe_option];
if ((tx_path | rx_path) & BB_PATH_A)
rtw_write32_mask(rtwdev, REG_AGCTR_A, MASKLWORD, 0x3231);
else
rtw_write32_mask(rtwdev, REG_AGCTR_A, MASKLWORD, 0x1111);
if ((tx_path | rx_path) & BB_PATH_B)
rtw_write32_mask(rtwdev, REG_AGCTR_B, MASKLWORD, 0x3231);
else
rtw_write32_mask(rtwdev, REG_AGCTR_B, MASKLWORD, 0x1111);
rtw_write32_mask(rtwdev, REG_CDDTXP, (BIT(19) | BIT(18)), 0x3);
rtw_write32_mask(rtwdev, REG_TXPSEL, (BIT(29) | BIT(28)), 0x1);
rtw_write32_mask(rtwdev, REG_TXPSEL, BIT(30), 0x1);
if (tx_path & BB_PATH_A) {
rtw_write32_mask(rtwdev, REG_CDDTXP, 0xfff00000, 0x001);
rtw_write32_mask(rtwdev, REG_ADCINI, 0xf0000000, 0x8);
} else if (tx_path & BB_PATH_B) {
rtw_write32_mask(rtwdev, REG_CDDTXP, 0xfff00000, 0x002);
rtw_write32_mask(rtwdev, REG_ADCINI, 0xf0000000, 0x4);
}
if (tx_path == BB_PATH_A || tx_path == BB_PATH_B)
rtw_write32_mask(rtwdev, REG_TXPSEL1, 0xfff0, 0x01);
else
rtw_write32_mask(rtwdev, REG_TXPSEL1, 0xfff0, 0x43);
tx_path_sel = (tx_path << 4) | tx_path;
rtw_write32_mask(rtwdev, REG_TXPSEL, MASKBYTE0, tx_path_sel);
if (tx_path != BB_PATH_A && tx_path != BB_PATH_B) {
if (is_tx2_path || rtwdev->mp_mode) {
rtw_write32_mask(rtwdev, REG_CDDTXP, 0xfff00000, 0x043);
rtw_write32_mask(rtwdev, REG_ADCINI, 0xf0000000, 0xc);
}
}
rtw_write32_mask(rtwdev, REG_RXDESC, BIT(22), 0x0);
rtw_write32_mask(rtwdev, REG_RXDESC, BIT(18), 0x0);
if (rx_path & BB_PATH_A)
rtw_write32_mask(rtwdev, REG_ADCINI, 0x0f000000, 0x0);
else if (rx_path & BB_PATH_B)
rtw_write32_mask(rtwdev, REG_ADCINI, 0x0f000000, 0x5);
rx_path_sel = (rx_path << 4) | rx_path;
rtw_write32_mask(rtwdev, REG_RXPSEL, MASKBYTE0, rx_path_sel);
if (rx_path == BB_PATH_A || rx_path == BB_PATH_B) {
rtw_write32_mask(rtwdev, REG_ANTWT, BIT(16), 0x0);
rtw_write32_mask(rtwdev, REG_HTSTFWT, BIT(28), 0x0);
rtw_write32_mask(rtwdev, REG_MRC, BIT(23), 0x0);
} else {
rtw_write32_mask(rtwdev, REG_ANTWT, BIT(16), 0x1);
rtw_write32_mask(rtwdev, REG_HTSTFWT, BIT(28), 0x1);
rtw_write32_mask(rtwdev, REG_MRC, BIT(23), 0x1);
}
for (counter = 100; counter > 0; counter--) {
u32 rf_reg33;
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x80000);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x00001);
udelay(2);
rf_reg33 = rtw_read_rf(rtwdev, RF_PATH_A, 0x33, RFREG_MASK);
if (rf_reg33 == 0x00001)
break;
}
if (WARN(counter <= 0, "write RF mode table fail\n"))
return;
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x80000);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x00001);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD1, RFREG_MASK, 0x00034);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x4080c);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x00000);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x00000);
rtw8822b_toggle_igi(rtwdev);
rtw8822b_set_channel_cca(rtwdev, 1, RTW_CHANNEL_WIDTH_20, rfe_info);
(*rfe_info->rtw_set_channel_rfe)(rtwdev, ch);
}
static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
s8 min_rx_power = -120;
u8 pwdb = GET_PHY_STAT_P0_PWDB(phy_status);
pkt_stat->rx_power[RF_PATH_A] = pwdb - 110;
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
min_rx_power);
}
static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
u8 rxsc, bw;
s8 min_rx_power = -120;
if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
else
rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
if (rxsc >= 1 && rxsc <= 8)
bw = RTW_CHANNEL_WIDTH_20;
else if (rxsc >= 9 && rxsc <= 12)
bw = RTW_CHANNEL_WIDTH_40;
else if (rxsc >= 13)
bw = RTW_CHANNEL_WIDTH_80;
else
bw = GET_PHY_STAT_P1_RF_MODE(phy_status);
pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110;
pkt_stat->rx_power[RF_PATH_B] = GET_PHY_STAT_P1_PWDB_B(phy_status) - 110;
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 2);
pkt_stat->bw = bw;
pkt_stat->signal_power = max3(pkt_stat->rx_power[RF_PATH_A],
pkt_stat->rx_power[RF_PATH_B],
min_rx_power);
}
static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
u8 page;
page = *phy_status & 0xf;
switch (page) {
case 0:
query_phy_status_page0(rtwdev, phy_status, pkt_stat);
break;
case 1:
query_phy_status_page1(rtwdev, phy_status, pkt_stat);
break;
default:
rtw_warn(rtwdev, "unused phy status page (%d)\n", page);
return;
}
}
static void rtw8822b_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
struct rtw_rx_pkt_stat *pkt_stat,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_hdr *hdr;
u32 desc_sz = rtwdev->chip->rx_pkt_desc_sz;
u8 *phy_status = NULL;
memset(pkt_stat, 0, sizeof(*pkt_stat));
pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
pkt_stat->shift = GET_RX_DESC_SHIFT(rx_desc);
pkt_stat->rate = GET_RX_DESC_RX_RATE(rx_desc);
pkt_stat->cam_id = GET_RX_DESC_MACID(rx_desc);
pkt_stat->ppdu_cnt = GET_RX_DESC_PPDU_CNT(rx_desc);
pkt_stat->tsf_low = GET_RX_DESC_TSFL(rx_desc);
/* drv_info_sz is in unit of 8-bytes */
pkt_stat->drv_info_sz *= 8;
/* c2h cmd pkt's rx/phy status is not interested */
if (pkt_stat->is_c2h)
return;
hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift +
pkt_stat->drv_info_sz);
if (pkt_stat->phy_status) {
phy_status = rx_desc + desc_sz + pkt_stat->shift;
query_phy_status(rtwdev, phy_status, pkt_stat);
}
rtw_rx_fill_rx_status(rtwdev, pkt_stat, hdr, rx_status, phy_status);
}
static void
rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
{
struct rtw_hal *hal = &rtwdev->hal;
static const u32 offset_txagc[2] = {0x1d00, 0x1d80};
static u32 phy_pwr_idx;
u8 rate, rate_idx, pwr_index, shift;
int j;
for (j = 0; j < rtw_rate_size[rs]; j++) {
rate = rtw_rate_section[rs][j];
pwr_index = hal->tx_pwr_tbl[path][rate];
shift = rate & 0x3;
phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
if (shift == 0x3) {
rate_idx = rate & 0xfc;
rtw_write32(rtwdev, offset_txagc[path] + rate_idx,
phy_pwr_idx);
phy_pwr_idx = 0;
}
}
}
static void rtw8822b_set_tx_power_index(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
int rs, path;
for (path = 0; path < hal->rf_path_num; path++) {
for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs);
}
}
static bool rtw8822b_check_rf_path(u8 antenna)
{
switch (antenna) {
case BB_PATH_A:
case BB_PATH_B:
case BB_PATH_AB:
return true;
default:
return false;
}
}
static void rtw8822b_set_antenna(struct rtw_dev *rtwdev, u8 antenna_tx,
u8 antenna_rx)
{
struct rtw_hal *hal = &rtwdev->hal;
rtw_dbg(rtwdev, RTW_DBG_PHY, "config RF path, tx=0x%x rx=0x%x\n",
antenna_tx, antenna_rx);
if (!rtw8822b_check_rf_path(antenna_tx)) {
rtw_info(rtwdev, "unsupport tx path, set to default path ab\n");
antenna_tx = BB_PATH_AB;
}
if (!rtw8822b_check_rf_path(antenna_rx)) {
rtw_info(rtwdev, "unsupport rx path, set to default path ab\n");
antenna_rx = BB_PATH_AB;
}
hal->antenna_tx = antenna_tx;
hal->antenna_rx = antenna_rx;
rtw8822b_config_trx_mode(rtwdev, antenna_tx, antenna_rx, false);
}
static void rtw8822b_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
{
u8 ldo_pwr;
ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3);
ldo_pwr = enable ? ldo_pwr | BIT(7) : ldo_pwr & ~BIT(7);
rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr);
}
static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u32 cck_enable;
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
cck_enable = rtw_read32(rtwdev, 0x808) & BIT(28);
cck_fa_cnt = rtw_read16(rtwdev, 0xa5c);
ofdm_fa_cnt = rtw_read16(rtwdev, 0xf48);
dm_info->cck_fa_cnt = cck_fa_cnt;
dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
dm_info->total_fa_cnt = ofdm_fa_cnt;
dm_info->total_fa_cnt += cck_enable ? cck_fa_cnt : 0;
rtw_write32_set(rtwdev, 0x9a4, BIT(17));
rtw_write32_clr(rtwdev, 0x9a4, BIT(17));
rtw_write32_clr(rtwdev, 0xa2c, BIT(15));
rtw_write32_set(rtwdev, 0xa2c, BIT(15));
rtw_write32_set(rtwdev, 0xb58, BIT(0));
rtw_write32_clr(rtwdev, 0xb58, BIT(0));
}
static void rtw8822b_do_iqk(struct rtw_dev *rtwdev)
{
static int do_iqk_cnt;
struct rtw_iqk_para para = {.clear = 0, .segment_iqk = 0};
u32 rf_reg, iqk_fail_mask;
int counter;
bool reload;
rtw_fw_do_iqk(rtwdev, &para);
for (counter = 0; counter < 300; counter++) {
rf_reg = rtw_read_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK);
if (rf_reg == 0xabcde)
break;
msleep(20);
}
rtw_write_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK, 0x0);
reload = !!rtw_read32_mask(rtwdev, REG_IQKFAILMSK, BIT(16));
iqk_fail_mask = rtw_read32_mask(rtwdev, REG_IQKFAILMSK, GENMASK(0, 7));
rtw_dbg(rtwdev, RTW_DBG_PHY,
"iqk counter=%d reload=%d do_iqk_cnt=%d n_iqk_fail(mask)=0x%02x\n",
counter, reload, ++do_iqk_cnt, iqk_fail_mask);
}
static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_SDIO,
RTW_PWR_CMD_WRITE, BIT(0), 0},
{0x0086,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_SDIO,
RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
{0x004A,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), 0},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(3) | BIT(4) | BIT(7), 0},
{0x0300,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0},
{0x0301,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0},
{0xFFFF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
0,
RTW_PWR_CMD_END, 0, 0},
};
static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822b[] = {
{0x0012,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(1), 0},
{0x0012,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
{0x0020,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
{0x0001,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_MS},
{0x0000,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(5), 0},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3) | BIT(2)), 0},
{0x0075,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
{0x0006,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
{0x0075,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), 0},
{0xFF1A,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0},
{0x0006,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(7), 0},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
{0x10C3,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_POLLING, BIT(0), 0},
{0x0020,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
{0x10A8,
RTW_PWR_CUT_C_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0},
{0x10A9,
RTW_PWR_CUT_C_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0xef},
{0x10AA,
RTW_PWR_CUT_C_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0x0c},
{0x0068,
RTW_PWR_CUT_C_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
{0x0029,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0xF9},
{0x0024,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(2), 0},
{0x0074,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
{0x00AF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
{0xFFFF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
0,
RTW_PWR_CMD_END, 0, 0},
};
static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822b[] = {
{0x0003,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(2), 0},
{0x0093,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(3), 0},
{0x001F,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0},
{0x00EF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0},
{0xFF1A,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0x30},
{0x0049,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(1), 0},
{0x0006,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
{0x0002,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(1), 0},
{0x10C3,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), 0},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_POLLING, BIT(1), 0},
{0x0020,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(3), 0},
{0x0000,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
{0xFFFF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
0,
RTW_PWR_CMD_END, 0, 0},
};
static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822b[] = {
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
{0x0007,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0x20},
{0x0067,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(5), 0},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
{0x004A,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), 0},
{0x0067,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(5), 0},
{0x0067,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(4), 0},
{0x004F,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), 0},
{0x0067,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(1), 0},
{0x0046,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
{0x0067,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(2), 0},
{0x0046,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
{0x0062,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
{0x0081,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(7) | BIT(6), 0},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)},
{0x0086,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_SDIO,
RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
{0x0086,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_SDIO,
RTW_PWR_CMD_POLLING, BIT(1), 0},
{0x0090,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(1), 0},
{0x0044,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_SDIO,
RTW_PWR_CMD_WRITE, 0xFF, 0},
{0x0040,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_SDIO,
RTW_PWR_CMD_WRITE, 0xFF, 0x90},
{0x0041,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_SDIO,
RTW_PWR_CMD_WRITE, 0xFF, 0x00},
{0x0042,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_SDIO,
RTW_PWR_CMD_WRITE, 0xFF, 0x04},
{0xFFFF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
0,
RTW_PWR_CMD_END, 0, 0},
};
static struct rtw_pwr_seq_cmd *card_enable_flow_8822b[] = {
trans_carddis_to_cardemu_8822b,
trans_cardemu_to_act_8822b,
NULL
};
static struct rtw_pwr_seq_cmd *card_disable_flow_8822b[] = {
trans_act_to_cardemu_8822b,
trans_cardemu_to_carddis_8822b,
NULL
};
static struct rtw_intf_phy_para usb2_param_8822b[] = {
{0xFFFF, 0x00,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
static struct rtw_intf_phy_para usb3_param_8822b[] = {
{0x0001, 0xA841,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_D,
RTW_INTF_PHY_PLATFORM_ALL},
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
static struct rtw_intf_phy_para pcie_gen1_param_8822b[] = {
{0x0001, 0xA841,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0002, 0x60C6,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0008, 0x3596,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0009, 0x321C,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x000A, 0x9623,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0020, 0x94FF,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0021, 0xFFCF,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0026, 0xC006,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0029, 0xFF0E,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x002A, 0x1840,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
static struct rtw_intf_phy_para pcie_gen2_param_8822b[] = {
{0x0001, 0xA841,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0002, 0x60C6,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0008, 0x3597,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0009, 0x321C,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x000A, 0x9623,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0020, 0x94FF,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0021, 0xFFCF,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0026, 0xC006,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x0029, 0xFF0E,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0x002A, 0x3040,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_C,
RTW_INTF_PHY_PLATFORM_ALL},
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
static struct rtw_intf_phy_para_table phy_para_table_8822b = {
.usb2_para = usb2_param_8822b,
.usb3_para = usb3_param_8822b,
.gen1_para = pcie_gen1_param_8822b,
.gen2_para = pcie_gen2_param_8822b,
.n_usb2_para = ARRAY_SIZE(usb2_param_8822b),
.n_usb3_para = ARRAY_SIZE(usb2_param_8822b),
.n_gen1_para = ARRAY_SIZE(pcie_gen1_param_8822b),
.n_gen2_para = ARRAY_SIZE(pcie_gen2_param_8822b),
};
static const struct rtw_rfe_def rtw8822b_rfe_defs[] = {
[2] = RTW_DEF_RFE(8822b, 2, 2),
[5] = RTW_DEF_RFE(8822b, 5, 5),
};
static struct rtw_hw_reg rtw8822b_dig[] = {
[0] = { .addr = 0xc50, .mask = 0x7f },
[1] = { .addr = 0xe50, .mask = 0x7f },
};
static struct rtw_page_table page_table_8822b[] = {
{64, 64, 64, 64, 1},
{64, 64, 64, 64, 1},
{64, 64, 0, 0, 1},
{64, 64, 64, 0, 1},
{64, 64, 64, 64, 1},
};
static struct rtw_rqpn rqpn_table_8822b[] = {
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_HIGH,
RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
};
static struct rtw_chip_ops rtw8822b_ops = {
.phy_set_param = rtw8822b_phy_set_param,
.read_efuse = rtw8822b_read_efuse,
.query_rx_desc = rtw8822b_query_rx_desc,
.set_channel = rtw8822b_set_channel,
.mac_init = rtw8822b_mac_init,
.read_rf = rtw_phy_read_rf,
.write_rf = rtw_phy_write_rf_reg_sipi,
.set_tx_power_index = rtw8822b_set_tx_power_index,
.set_antenna = rtw8822b_set_antenna,
.cfg_ldo25 = rtw8822b_cfg_ldo25,
.false_alarm_statistics = rtw8822b_false_alarm_statistics,
.do_iqk = rtw8822b_do_iqk,
};
struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
.fw_name = "rtw88/rtw8822b_fw.bin",
.tx_pkt_desc_sz = 48,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
.rx_buf_desc_sz = 8,
.phy_efuse_size = 1024,
.log_efuse_size = 768,
.ptct_efuse_size = 96,
.txff_size = 262144,
.rxff_size = 24576,
.txgi_factor = 1,
.is_pwr_by_rate_dec = true,
.max_power_index = 0x3f,
.csi_buf_pg_num = 0,
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = 128,
.dig_min = 0x1c,
.ht_supported = true,
.vht_supported = true,
.sys_func_en = 0xDC,
.pwr_on_seq = card_enable_flow_8822b,
.pwr_off_seq = card_disable_flow_8822b,
.page_table = page_table_8822b,
.rqpn_table = rqpn_table_8822b,
.intf_table = &phy_para_table_8822b,
.dig = rtw8822b_dig,
.rf_base_addr = {0x2800, 0x2c00},
.rf_sipi_addr = {0xc90, 0xe90},
.mac_tbl = &rtw8822b_mac_tbl,
.agc_tbl = &rtw8822b_agc_tbl,
.bb_tbl = &rtw8822b_bb_tbl,
.rf_tbl = {&rtw8822b_rf_a_tbl, &rtw8822b_rf_b_tbl},
.rfe_defs = rtw8822b_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8822b_rfe_defs),
};
EXPORT_SYMBOL(rtw8822b_hw_spec);
MODULE_FIRMWARE("rtw88/rtw8822b_fw.bin");
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW8822B_H__
#define __RTW8822B_H__
#include <asm/byteorder.h>
#define RCR_VHT_ACK BIT(26)
struct rtw8822bu_efuse {
u8 res4[4]; /* 0xd0 */
u8 usb_optional_function;
u8 res5[0x1e];
u8 res6[2];
u8 serial[0x0b]; /* 0xf5 */
u8 vid; /* 0x100 */
u8 res7;
u8 pid;
u8 res8[4];
u8 mac_addr[ETH_ALEN]; /* 0x107 */
u8 res9[2];
u8 vendor_name[0x07];
u8 res10[2];
u8 device_name[0x14];
u8 res11[0xcf];
u8 package_type; /* 0x1fb */
u8 res12[0x4];
};
struct rtw8822be_efuse {
u8 mac_addr[ETH_ALEN]; /* 0xd0 */
u8 vender_id[2];
u8 device_id[2];
u8 sub_vender_id[2];
u8 sub_device_id[2];
u8 pmc[2];
u8 exp_device_cap[2];
u8 msi_cap;
u8 ltr_cap; /* 0xe3 */
u8 exp_link_control[2];
u8 link_cap[4];
u8 link_control[2];
u8 serial_number[8];
u8 res0:2; /* 0xf4 */
u8 ltr_en:1;
u8 res1:2;
u8 obff:2;
u8 res2:3;
u8 obff_cap:2;
u8 res3:4;
u8 res4[3];
u8 class_code[3];
u8 pci_pm_L1_2_supp:1;
u8 pci_pm_L1_1_supp:1;
u8 aspm_pm_L1_2_supp:1;
u8 aspm_pm_L1_1_supp:1;
u8 L1_pm_substates_supp:1;
u8 res5:3;
u8 port_common_mode_restore_time;
u8 port_t_power_on_scale:2;
u8 res6:1;
u8 port_t_power_on_value:5;
u8 res7;
};
struct rtw8822b_efuse {
__le16 rtl_id;
u8 res0[0x0e];
/* power index for four RF paths */
struct rtw_txpwr_idx txpwr_idx_table[4];
u8 channel_plan; /* 0xb8 */
u8 xtal_k;
u8 thermal_meter;
u8 iqk_lck;
u8 pa_type; /* 0xbc */
u8 lna_type_2g[2]; /* 0xbd */
u8 lna_type_5g[2];
u8 rf_board_option;
u8 rf_feature_option;
u8 rf_bt_setting;
u8 eeprom_version;
u8 eeprom_customer_id;
u8 tx_bb_swing_setting_2g;
u8 tx_bb_swing_setting_5g;
u8 tx_pwr_calibrate_rate;
u8 rf_antenna_option; /* 0xc9 */
u8 rfe_option;
u8 country_code[2];
u8 res[3];
union {
struct rtw8822bu_efuse u;
struct rtw8822be_efuse e;
};
};
static inline void
_rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
{
/* 0xC00-0xCFF and 0xE00-0xEFF have the same layout */
rtw_write32_mask(rtwdev, addr, mask, data);
rtw_write32_mask(rtwdev, addr + 0x200, mask, data);
}
#define rtw_write32s_mask(rtwdev, addr, mask, data) \
do { \
BUILD_BUG_ON((addr) < 0xC00 || (addr) >= 0xD00); \
\
_rtw_write32s_mask(rtwdev, addr, mask, data); \
} while (0)
/* phy status page0 */
#define GET_PHY_STAT_P0_PWDB(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
/* phy status page1 */
#define GET_PHY_STAT_P1_PWDB_A(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
#define GET_PHY_STAT_P1_PWDB_B(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(23, 16))
#define GET_PHY_STAT_P1_RF_MODE(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x03), GENMASK(29, 28))
#define GET_PHY_STAT_P1_L_RXSC(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
#define REG_HTSTFWT 0x800
#define REG_RXPSEL 0x808
#define BIT_RX_PSEL_RST (BIT(28) | BIT(29))
#define REG_TXPSEL 0x80c
#define REG_RXCCAMSK 0x814
#define REG_CCASEL 0x82c
#define REG_PDMFTH 0x830
#define REG_CCA2ND 0x838
#define REG_L1WT 0x83c
#define REG_L1PKWT 0x840
#define REG_MRC 0x850
#define REG_CLKTRK 0x860
#define REG_ADCCLK 0x8ac
#define REG_ADC160 0x8c4
#define REG_ADC40 0x8c8
#define REG_CDDTXP 0x93c
#define REG_TXPSEL1 0x940
#define REG_ACBB0 0x948
#define REG_ACBBRXFIR 0x94c
#define REG_ACGG2TBL 0x958
#define REG_RXSB 0xa00
#define REG_ADCINI 0xa04
#define REG_TXSF2 0xa24
#define REG_TXSF6 0xa28
#define REG_RXDESC 0xa2c
#define REG_ENTXCCK 0xa80
#define REG_AGCTR_A 0xc08
#define REG_TXDFIR 0xc20
#define REG_RXIGI_A 0xc50
#define REG_TRSW 0xca0
#define REG_RFESEL0 0xcb0
#define REG_RFESEL8 0xcb4
#define REG_RFECTL 0xcb8
#define REG_RFEINV 0xcbc
#define REG_AGCTR_B 0xe08
#define REG_RXIGI_B 0xe50
#define REG_ANTWT 0x1904
#define REG_IQKFAILMSK 0x1bf0
#endif
This source diff could not be displayed because it is too large. You can view the blob instead.
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW8822B_TABLE_H__
#define __RTW8822B_TABLE_H__
extern const struct rtw_table rtw8822b_mac_tbl;
extern const struct rtw_table rtw8822b_agc_tbl;
extern const struct rtw_table rtw8822b_bb_tbl;
extern const struct rtw_table rtw8822b_bb_pg_type2_tbl;
extern const struct rtw_table rtw8822b_bb_pg_type5_tbl;
extern const struct rtw_table rtw8822b_rf_a_tbl;
extern const struct rtw_table rtw8822b_rf_b_tbl;
extern const struct rtw_table rtw8822b_txpwr_lmt_type2_tbl;
extern const struct rtw_table rtw8822b_txpwr_lmt_type5_tbl;
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include "main.h"
#include "fw.h"
#include "tx.h"
#include "rx.h"
#include "phy.h"
#include "rtw8822c.h"
#include "rtw8822c_table.h"
#include "mac.h"
#include "reg.h"
#include "debug.h"
static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
static void rtw8822ce_efuse_parsing(struct rtw_efuse *efuse,
struct rtw8822c_efuse *map)
{
ether_addr_copy(efuse->addr, map->e.mac_addr);
}
static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw8822c_efuse *map;
int i;
map = (struct rtw8822c_efuse *)log_map;
efuse->rfe_option = map->rfe_option;
efuse->crystal_cap = map->xtal_k;
efuse->channel_plan = map->channel_plan;
efuse->country_code[0] = map->country_code[0];
efuse->country_code[1] = map->country_code[1];
efuse->bt_setting = map->rf_bt_setting;
efuse->regd = map->rf_board_option & 0x7;
for (i = 0; i < 4; i++)
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
rtw8822ce_efuse_parsing(efuse, map);
break;
default:
/* unsupported now */
return -ENOTSUPP;
}
return 0;
}
static void rtw8822c_header_file_init(struct rtw_dev *rtwdev, bool pre)
{
rtw_write32_set(rtwdev, REG_3WIRE, BIT_3WIRE_TX_EN | BIT_3WIRE_RX_EN);
rtw_write32_set(rtwdev, REG_3WIRE, BIT_3WIRE_PI_ON);
rtw_write32_set(rtwdev, REG_3WIRE2, BIT_3WIRE_TX_EN | BIT_3WIRE_RX_EN);
rtw_write32_set(rtwdev, REG_3WIRE2, BIT_3WIRE_PI_ON);
if (pre)
rtw_write32_clr(rtwdev, REG_ENCCK, BIT_CCK_OFDM_BLK_EN);
else
rtw_write32_set(rtwdev, REG_ENCCK, BIT_CCK_OFDM_BLK_EN);
}
static void rtw8822c_dac_backup_reg(struct rtw_dev *rtwdev,
struct rtw_backup_info *backup,
struct rtw_backup_info *backup_rf)
{
u32 path, i;
u32 val;
u32 reg;
u32 rf_addr[DACK_RF_8822C] = {0x8f};
u32 addrs[DACK_REG_8822C] = {0x180c, 0x1810, 0x410c, 0x4110,
0x1c3c, 0x1c24, 0x1d70, 0x9b4,
0x1a00, 0x1a14, 0x1d58, 0x1c38,
0x1e24, 0x1e28, 0x1860, 0x4160};
for (i = 0; i < DACK_REG_8822C; i++) {
backup[i].len = 4;
backup[i].reg = addrs[i];
backup[i].val = rtw_read32(rtwdev, addrs[i]);
}
for (path = 0; path < DACK_PATH_8822C; path++) {
for (i = 0; i < DACK_RF_8822C; i++) {
reg = rf_addr[i];
val = rtw_read_rf(rtwdev, path, reg, RFREG_MASK);
backup_rf[path * i + i].reg = reg;
backup_rf[path * i + i].val = val;
}
}
}
static void rtw8822c_dac_restore_reg(struct rtw_dev *rtwdev,
struct rtw_backup_info *backup,
struct rtw_backup_info *backup_rf)
{
u32 path, i;
u32 val;
u32 reg;
rtw_restore_reg(rtwdev, backup, DACK_REG_8822C);
for (path = 0; path < DACK_PATH_8822C; path++) {
for (i = 0; i < DACK_RF_8822C; i++) {
val = backup_rf[path * i + i].val;
reg = backup_rf[path * i + i].reg;
rtw_write_rf(rtwdev, path, reg, RFREG_MASK, val);
}
}
}
static void rtw8822c_rf_minmax_cmp(struct rtw_dev *rtwdev, u32 value,
u32 *min, u32 *max)
{
if (value >= 0x200) {
if (*min >= 0x200) {
if (*min > value)
*min = value;
} else {
*min = value;
}
if (*max >= 0x200) {
if (*max < value)
*max = value;
}
} else {
if (*min < 0x200) {
if (*min > value)
*min = value;
}
if (*max >= 0x200) {
*max = value;
} else {
if (*max < value)
*max = value;
}
}
}
static void swap_u32(u32 *v1, u32 *v2)
{
u32 tmp;
tmp = *v1;
*v1 = *v2;
*v2 = tmp;
}
static void __rtw8822c_dac_iq_sort(struct rtw_dev *rtwdev, u32 *v1, u32 *v2)
{
if (*v1 >= 0x200 && *v2 >= 0x200) {
if (*v1 > *v2)
swap_u32(v1, v2);
} else if (*v1 < 0x200 && *v2 < 0x200) {
if (*v1 > *v2)
swap_u32(v1, v2);
} else if (*v1 < 0x200 && *v2 >= 0x200) {
swap_u32(v1, v2);
}
}
static void rtw8822c_dac_iq_sort(struct rtw_dev *rtwdev, u32 *iv, u32 *qv)
{
u32 i, j;
for (i = 0; i < DACK_SN_8822C - 1; i++) {
for (j = 0; j < (DACK_SN_8822C - 1 - i) ; j++) {
__rtw8822c_dac_iq_sort(rtwdev, &iv[j], &iv[j + 1]);
__rtw8822c_dac_iq_sort(rtwdev, &qv[j], &qv[j + 1]);
}
}
}
static void rtw8822c_dac_iq_offset(struct rtw_dev *rtwdev, u32 *vec, u32 *val)
{
u32 p, m, t, i;
m = 0;
p = 0;
for (i = 10; i < DACK_SN_8822C - 10; i++) {
if (vec[i] > 0x200)
m = (0x400 - vec[i]) + m;
else
p = vec[i] + p;
}
if (p > m) {
t = p - m;
t = t / (DACK_SN_8822C - 20);
} else {
t = m - p;
t = t / (DACK_SN_8822C - 20);
if (t != 0x0)
t = 0x400 - t;
}
*val = t;
}
static u32 rtw8822c_get_path_base_addr(u8 path)
{
u32 base_addr;
switch (path) {
case RF_PATH_A:
base_addr = 0x1800;
break;
case RF_PATH_B:
base_addr = 0x4100;
break;
default:
WARN_ON(1);
return -1;
}
return base_addr;
}
static bool rtw8822c_dac_iq_check(struct rtw_dev *rtwdev, u32 value)
{
bool ret = true;
if ((value >= 0x200 && (0x400 - value) > 0x64) ||
(value < 0x200 && value > 0x64)) {
ret = false;
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] Error overflow\n");
}
return ret;
}
static void rtw8822c_dac_cal_iq_sample(struct rtw_dev *rtwdev, u32 *iv, u32 *qv)
{
u32 temp;
int i = 0, cnt = 0;
while (i < DACK_SN_8822C && cnt < 10000) {
cnt++;
temp = rtw_read32_mask(rtwdev, 0x2dbc, 0x3fffff);
iv[i] = (temp & 0x3ff000) >> 12;
qv[i] = temp & 0x3ff;
if (rtw8822c_dac_iq_check(rtwdev, iv[i]) &&
rtw8822c_dac_iq_check(rtwdev, qv[i]))
i++;
}
}
static void rtw8822c_dac_cal_iq_search(struct rtw_dev *rtwdev,
u32 *iv, u32 *qv,
u32 *i_value, u32 *q_value)
{
u32 i_max = 0, q_max = 0, i_min = 0, q_min = 0;
u32 i_delta, q_delta;
u32 temp;
int i, cnt = 0;
do {
i_min = iv[0];
i_max = iv[0];
q_min = qv[0];
q_max = qv[0];
for (i = 0; i < DACK_SN_8822C; i++) {
rtw8822c_rf_minmax_cmp(rtwdev, iv[i], &i_min, &i_max);
rtw8822c_rf_minmax_cmp(rtwdev, qv[i], &q_min, &q_max);
}
if (i_max < 0x200 && i_min < 0x200)
i_delta = i_max - i_min;
else if (i_max >= 0x200 && i_min >= 0x200)
i_delta = i_max - i_min;
else
i_delta = i_max + (0x400 - i_min);
if (q_max < 0x200 && q_min < 0x200)
q_delta = q_max - q_min;
else if (q_max >= 0x200 && q_min >= 0x200)
q_delta = q_max - q_min;
else
q_delta = q_max + (0x400 - q_min);
rtw_dbg(rtwdev, RTW_DBG_RFK,
"[DACK] i: min=0x%08x, max=0x%08x, delta=0x%08x\n",
i_min, i_max, i_delta);
rtw_dbg(rtwdev, RTW_DBG_RFK,
"[DACK] q: min=0x%08x, max=0x%08x, delta=0x%08x\n",
q_min, q_max, q_delta);
rtw8822c_dac_iq_sort(rtwdev, iv, qv);
if (i_delta > 5 || q_delta > 5) {
temp = rtw_read32_mask(rtwdev, 0x2dbc, 0x3fffff);
iv[0] = (temp & 0x3ff000) >> 12;
qv[0] = temp & 0x3ff;
temp = rtw_read32_mask(rtwdev, 0x2dbc, 0x3fffff);
iv[DACK_SN_8822C - 1] = (temp & 0x3ff000) >> 12;
qv[DACK_SN_8822C - 1] = temp & 0x3ff;
} else {
break;
}
} while (cnt++ < 100);
rtw8822c_dac_iq_offset(rtwdev, iv, i_value);
rtw8822c_dac_iq_offset(rtwdev, qv, q_value);
}
static void rtw8822c_dac_cal_rf_mode(struct rtw_dev *rtwdev,
u32 *i_value, u32 *q_value)
{
u32 iv[DACK_SN_8822C], qv[DACK_SN_8822C];
u32 rf_a, rf_b;
mdelay(10);
rf_a = rtw_read_rf(rtwdev, RF_PATH_A, 0x0, RFREG_MASK);
rf_b = rtw_read_rf(rtwdev, RF_PATH_B, 0x0, RFREG_MASK);
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] RF path-A=0x%05x\n", rf_a);
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] RF path-B=0x%05x\n", rf_b);
rtw8822c_dac_cal_iq_sample(rtwdev, iv, qv);
rtw8822c_dac_cal_iq_search(rtwdev, iv, qv, i_value, q_value);
}
static void rtw8822c_dac_bb_setting(struct rtw_dev *rtwdev)
{
rtw_write32_mask(rtwdev, 0x1d58, 0xff8, 0x1ff);
rtw_write32_mask(rtwdev, 0x1a00, 0x3, 0x2);
rtw_write32_mask(rtwdev, 0x1a14, 0x300, 0x3);
rtw_write32(rtwdev, 0x1d70, 0x7e7e7e7e);
rtw_write32_mask(rtwdev, 0x180c, 0x3, 0x0);
rtw_write32_mask(rtwdev, 0x410c, 0x3, 0x0);
rtw_write32(rtwdev, 0x1b00, 0x00000008);
rtw_write8(rtwdev, 0x1bcc, 0x3f);
rtw_write32(rtwdev, 0x1b00, 0x0000000a);
rtw_write8(rtwdev, 0x1bcc, 0x3f);
rtw_write32_mask(rtwdev, 0x1e24, BIT(31), 0x0);
rtw_write32_mask(rtwdev, 0x1e28, 0xf, 0x3);
}
static void rtw8822c_dac_cal_adc(struct rtw_dev *rtwdev,
u8 path, u32 *adc_ic, u32 *adc_qc)
{
u32 ic = 0, qc = 0, temp = 0;
u32 base_addr;
u32 path_sel;
int i;
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] ADCK path(%d)\n", path);
base_addr = rtw8822c_get_path_base_addr(path);
switch (path) {
case RF_PATH_A:
path_sel = 0xa0000;
break;
case RF_PATH_B:
path_sel = 0x80000;
break;
default:
WARN_ON(1);
return;
}
/* ADCK step1 */
rtw_write32_mask(rtwdev, base_addr + 0x30, BIT(30), 0x0);
if (path == RF_PATH_B)
rtw_write32(rtwdev, base_addr + 0x30, 0x30db8041);
rtw_write32(rtwdev, base_addr + 0x60, 0xf0040ff0);
rtw_write32(rtwdev, base_addr + 0x0c, 0xdff00220);
rtw_write32(rtwdev, base_addr + 0x10, 0x02dd08c4);
rtw_write32(rtwdev, base_addr + 0x0c, 0x10000260);
rtw_write_rf(rtwdev, RF_PATH_A, 0x0, RFREG_MASK, 0x10000);
rtw_write_rf(rtwdev, RF_PATH_B, 0x0, RFREG_MASK, 0x10000);
for (i = 0; i < 10; i++) {
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] ADCK count=%d\n", i);
rtw_write32(rtwdev, 0x1c3c, path_sel + 0x8003);
rtw_write32(rtwdev, 0x1c24, 0x00010002);
rtw8822c_dac_cal_rf_mode(rtwdev, &ic, &qc);
rtw_dbg(rtwdev, RTW_DBG_RFK,
"[DACK] before: i=0x%x, q=0x%x\n", ic, qc);
/* compensation value */
if (ic != 0x0) {
ic = 0x400 - ic;
*adc_ic = ic;
}
if (qc != 0x0) {
qc = 0x400 - qc;
*adc_qc = qc;
}
temp = (ic & 0x3ff) | ((qc & 0x3ff) << 10);
rtw_write32(rtwdev, base_addr + 0x68, temp);
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] ADCK 0x%08x=0x08%x\n",
base_addr + 0x68, temp);
/* check ADC DC offset */
rtw_write32(rtwdev, 0x1c3c, path_sel + 0x8103);
rtw8822c_dac_cal_rf_mode(rtwdev, &ic, &qc);
rtw_dbg(rtwdev, RTW_DBG_RFK,
"[DACK] after: i=0x%08x, q=0x%08x\n", ic, qc);
if (ic >= 0x200)
ic = 0x400 - ic;
if (qc >= 0x200)
qc = 0x400 - qc;
if (ic < 5 && qc < 5)
break;
}
/* ADCK step2 */
rtw_write32(rtwdev, 0x1c3c, 0x00000003);
rtw_write32(rtwdev, base_addr + 0x0c, 0x10000260);
rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c4);
/* release pull low switch on IQ path */
rtw_write_rf(rtwdev, path, 0x8f, BIT(13), 0x1);
}
static void rtw8822c_dac_cal_step1(struct rtw_dev *rtwdev, u8 path)
{
u32 base_addr;
base_addr = rtw8822c_get_path_base_addr(path);
rtw_write32(rtwdev, base_addr + 0x0c, 0xdff00220);
if (path == RF_PATH_A) {
rtw_write32(rtwdev, base_addr + 0x60, 0xf0040ff0);
rtw_write32(rtwdev, 0x1c38, 0xffffffff);
}
rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
rtw_write32(rtwdev, 0x9b4, 0xdb66db00);
rtw_write32(rtwdev, base_addr + 0xb0, 0x0a11fb88);
rtw_write32(rtwdev, base_addr + 0xbc, 0x0008ff81);
rtw_write32(rtwdev, base_addr + 0xc0, 0x0003d208);
rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb88);
rtw_write32(rtwdev, base_addr + 0xd8, 0x0008ff81);
rtw_write32(rtwdev, base_addr + 0xdc, 0x0003d208);
rtw_write32(rtwdev, base_addr + 0xb8, 0x60000000);
mdelay(2);
rtw_write32(rtwdev, base_addr + 0xbc, 0x000aff8d);
mdelay(2);
rtw_write32(rtwdev, base_addr + 0xb0, 0x0a11fb89);
rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb89);
mdelay(1);
rtw_write32(rtwdev, base_addr + 0xb8, 0x62000000);
mdelay(20);
rtw_write32(rtwdev, base_addr + 0xd4, 0x62000000);
mdelay(20);
rtw_write32(rtwdev, base_addr + 0xb8, 0x02000000);
mdelay(20);
rtw_write32(rtwdev, base_addr + 0xbc, 0x0008ff87);
rtw_write32(rtwdev, 0x9b4, 0xdb6db600);
rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
rtw_write32(rtwdev, base_addr + 0xbc, 0x0008ff87);
rtw_write32(rtwdev, base_addr + 0x60, 0xf0000000);
}
static void rtw8822c_dac_cal_step2(struct rtw_dev *rtwdev,
u8 path, u32 *ic_out, u32 *qc_out)
{
u32 base_addr;
u32 ic, qc, ic_in, qc_in;
base_addr = rtw8822c_get_path_base_addr(path);
rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xf0000000, 0x0);
rtw_write32_mask(rtwdev, base_addr + 0xc0, 0xf, 0x8);
rtw_write32_mask(rtwdev, base_addr + 0xd8, 0xf0000000, 0x0);
rtw_write32_mask(rtwdev, base_addr + 0xdc, 0xf, 0x8);
rtw_write32(rtwdev, 0x1b00, 0x00000008);
rtw_write8(rtwdev, 0x1bcc, 0x03f);
rtw_write32(rtwdev, base_addr + 0x0c, 0xdff00220);
rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
rtw_write32(rtwdev, 0x1c3c, 0x00088103);
rtw8822c_dac_cal_rf_mode(rtwdev, &ic_in, &qc_in);
ic = ic_in;
qc = qc_in;
/* compensation value */
if (ic != 0x0)
ic = 0x400 - ic;
if (qc != 0x0)
qc = 0x400 - qc;
if (ic < 0x300) {
ic = ic * 2 * 6 / 5;
ic = ic + 0x80;
} else {
ic = (0x400 - ic) * 2 * 6 / 5;
ic = 0x7f - ic;
}
if (qc < 0x300) {
qc = qc * 2 * 6 / 5;
qc = qc + 0x80;
} else {
qc = (0x400 - qc) * 2 * 6 / 5;
qc = 0x7f - qc;
}
*ic_out = ic;
*qc_out = qc;
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] before i=0x%x, q=0x%x\n", ic_in, qc_in);
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] after i=0x%x, q=0x%x\n", ic, qc);
}
static void rtw8822c_dac_cal_step3(struct rtw_dev *rtwdev, u8 path,
u32 adc_ic, u32 adc_qc,
u32 *ic_in, u32 *qc_in,
u32 *i_out, u32 *q_out)
{
u32 base_addr;
u32 ic, qc;
u32 temp;
base_addr = rtw8822c_get_path_base_addr(path);
ic = *ic_in;
qc = *qc_in;
rtw_write32(rtwdev, base_addr + 0x0c, 0xdff00220);
rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
rtw_write32(rtwdev, 0x9b4, 0xdb66db00);
rtw_write32(rtwdev, base_addr + 0xb0, 0x0a11fb88);
rtw_write32(rtwdev, base_addr + 0xbc, 0xc008ff81);
rtw_write32(rtwdev, base_addr + 0xc0, 0x0003d208);
rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xf0000000, ic & 0xf);
rtw_write32_mask(rtwdev, base_addr + 0xc0, 0xf, (ic & 0xf0) >> 4);
rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb88);
rtw_write32(rtwdev, base_addr + 0xd8, 0xe008ff81);
rtw_write32(rtwdev, base_addr + 0xdc, 0x0003d208);
rtw_write32_mask(rtwdev, base_addr + 0xd8, 0xf0000000, qc & 0xf);
rtw_write32_mask(rtwdev, base_addr + 0xdc, 0xf, (qc & 0xf0) >> 4);
rtw_write32(rtwdev, base_addr + 0xb8, 0x60000000);
mdelay(2);
rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xe, 0x6);
mdelay(2);
rtw_write32(rtwdev, base_addr + 0xb0, 0x0a11fb89);
rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb89);
mdelay(1);
rtw_write32(rtwdev, base_addr + 0xb8, 0x62000000);
mdelay(20);
rtw_write32(rtwdev, base_addr + 0xd4, 0x62000000);
mdelay(20);
rtw_write32(rtwdev, base_addr + 0xb8, 0x02000000);
mdelay(20);
rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xe, 0x3);
rtw_write32(rtwdev, 0x9b4, 0xdb6db600);
/* check DAC DC offset */
temp = ((adc_ic + 0x10) & 0x3ff) | (((adc_qc + 0x10) & 0x3ff) << 10);
rtw_write32(rtwdev, base_addr + 0x68, temp);
rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
rtw_write32(rtwdev, base_addr + 0x60, 0xf0000000);
rtw8822c_dac_cal_rf_mode(rtwdev, &ic, &qc);
if (ic >= 0x10)
ic = ic - 0x10;
else
ic = 0x400 - (0x10 - ic);
if (qc >= 0x10)
qc = qc - 0x10;
else
qc = 0x400 - (0x10 - qc);
*i_out = ic;
*q_out = qc;
if (ic >= 0x200)
ic = 0x400 - ic;
if (qc >= 0x200)
qc = 0x400 - qc;
*ic_in = ic;
*qc_in = qc;
rtw_dbg(rtwdev, RTW_DBG_RFK,
"[DACK] after DACK i=0x%x, q=0x%x\n", *i_out, *q_out);
}
static void rtw8822c_dac_cal_step4(struct rtw_dev *rtwdev, u8 path)
{
u32 base_addr = rtw8822c_get_path_base_addr(path);
rtw_write32(rtwdev, base_addr + 0x68, 0x0);
rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c4);
rtw_write32_mask(rtwdev, base_addr + 0xbc, 0x1, 0x0);
rtw_write32_mask(rtwdev, base_addr + 0x30, BIT(30), 0x1);
}
static void rtw8822c_rf_dac_cal(struct rtw_dev *rtwdev)
{
struct rtw_backup_info backup_rf[DACK_RF_8822C * DACK_PATH_8822C];
struct rtw_backup_info backup[DACK_REG_8822C];
u32 ic = 0, qc = 0, i;
u32 i_a = 0x0, q_a = 0x0, i_b = 0x0, q_b = 0x0;
u32 ic_a = 0x0, qc_a = 0x0, ic_b = 0x0, qc_b = 0x0;
u32 adc_ic_a = 0x0, adc_qc_a = 0x0, adc_ic_b = 0x0, adc_qc_b = 0x0;
rtw8822c_dac_backup_reg(rtwdev, backup, backup_rf);
rtw8822c_dac_bb_setting(rtwdev);
/* path-A */
rtw8822c_dac_cal_adc(rtwdev, RF_PATH_A, &adc_ic_a, &adc_qc_a);
for (i = 0; i < 10; i++) {
rtw8822c_dac_cal_step1(rtwdev, RF_PATH_A);
rtw8822c_dac_cal_step2(rtwdev, RF_PATH_A, &ic, &qc);
ic_a = ic;
qc_a = qc;
rtw8822c_dac_cal_step3(rtwdev, RF_PATH_A, adc_ic_a, adc_qc_a,
&ic, &qc, &i_a, &q_a);
if (ic < 5 && qc < 5)
break;
}
rtw8822c_dac_cal_step4(rtwdev, RF_PATH_A);
/* path-B */
rtw8822c_dac_cal_adc(rtwdev, RF_PATH_B, &adc_ic_b, &adc_qc_b);
for (i = 0; i < 10; i++) {
rtw8822c_dac_cal_step1(rtwdev, RF_PATH_B);
rtw8822c_dac_cal_step2(rtwdev, RF_PATH_B, &ic, &qc);
ic_b = ic;
qc_b = qc;
rtw8822c_dac_cal_step3(rtwdev, RF_PATH_B, adc_ic_b, adc_qc_b,
&ic, &qc, &i_b, &q_b);
if (ic < 5 && qc < 5)
break;
}
rtw8822c_dac_cal_step4(rtwdev, RF_PATH_B);
rtw_write32(rtwdev, 0x1b00, 0x00000008);
rtw_write32_mask(rtwdev, 0x4130, BIT(30), 0x1);
rtw_write8(rtwdev, 0x1bcc, 0x0);
rtw_write32(rtwdev, 0x1b00, 0x0000000a);
rtw_write8(rtwdev, 0x1bcc, 0x0);
rtw8822c_dac_restore_reg(rtwdev, backup, backup_rf);
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path A: ic=0x%x, qc=0x%x\n", ic_a, qc_a);
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path B: ic=0x%x, qc=0x%x\n", ic_b, qc_b);
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path A: i=0x%x, q=0x%x\n", i_a, q_a);
rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path B: i=0x%x, q=0x%x\n", i_b, q_b);
}
static void rtw8822c_rf_x2_check(struct rtw_dev *rtwdev)
{
u8 x2k_busy;
mdelay(1);
x2k_busy = rtw_read_rf(rtwdev, RF_PATH_A, 0xb8, BIT(15));
if (x2k_busy == 1) {
rtw_write_rf(rtwdev, RF_PATH_A, 0xb8, RFREG_MASK, 0xC4440);
rtw_write_rf(rtwdev, RF_PATH_A, 0xba, RFREG_MASK, 0x6840D);
rtw_write_rf(rtwdev, RF_PATH_A, 0xb8, RFREG_MASK, 0x80440);
mdelay(1);
}
}
static void rtw8822c_rf_init(struct rtw_dev *rtwdev)
{
rtw8822c_rf_dac_cal(rtwdev);
rtw8822c_rf_x2_check(rtwdev);
}
static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
struct rtw_hal *hal = &rtwdev->hal;
u8 crystal_cap;
u8 cck_gi_u_bnd_msb = 0;
u8 cck_gi_u_bnd_lsb = 0;
u8 cck_gi_l_bnd_msb = 0;
u8 cck_gi_l_bnd_lsb = 0;
bool is_tx2_path;
/* power on BB/RF domain */
rtw_write8_set(rtwdev, REG_SYS_FUNC_EN,
BIT_FEN_BB_GLB_RST | BIT_FEN_BB_RSTB);
rtw_write8_set(rtwdev, REG_RF_CTRL,
BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB);
rtw_write32_set(rtwdev, REG_WLRF1, BIT_WLRF1_BBRF_EN);
/* pre init before header files config */
rtw8822c_header_file_init(rtwdev, true);
rtw_phy_load_tables(rtwdev);
crystal_cap = rtwdev->efuse.crystal_cap & 0x7f;
rtw_write32_mask(rtwdev, REG_ANAPAR_XTAL_0, 0xfffc00,
crystal_cap | (crystal_cap << 7));
/* post init after header files config */
rtw8822c_header_file_init(rtwdev, false);
is_tx2_path = false;
rtw8822c_config_trx_mode(rtwdev, hal->antenna_tx, hal->antenna_rx,
is_tx2_path);
rtw_phy_init(rtwdev);
cck_gi_u_bnd_msb = (u8)rtw_read32_mask(rtwdev, 0x1a98, 0xc000);
cck_gi_u_bnd_lsb = (u8)rtw_read32_mask(rtwdev, 0x1aa8, 0xf0000);
cck_gi_l_bnd_msb = (u8)rtw_read32_mask(rtwdev, 0x1a98, 0xc0);
cck_gi_l_bnd_lsb = (u8)rtw_read32_mask(rtwdev, 0x1a70, 0x0f000000);
dm_info->cck_gi_u_bnd = ((cck_gi_u_bnd_msb << 4) | (cck_gi_u_bnd_lsb));
dm_info->cck_gi_l_bnd = ((cck_gi_l_bnd_msb << 4) | (cck_gi_l_bnd_lsb));
rtw8822c_rf_init(rtwdev);
/* wifi path controller */
rtw_write32_mask(rtwdev, 0x70, 0xff000000, 0x0e);
rtw_write32_mask(rtwdev, 0x1704, 0xffffffff, 0x7700);
rtw_write32_mask(rtwdev, 0x1700, 0xffffffff, 0xc00f0038);
rtw_write32_mask(rtwdev, 0x6c0, 0xffffffff, 0xaaaaaaaa);
rtw_write32_mask(rtwdev, 0x6c4, 0xffffffff, 0xaaaaaaaa);
}
#define WLAN_TXQ_RPT_EN 0x1F
#define WLAN_SLOT_TIME 0x09
#define WLAN_PIFS_TIME 0x1C
#define WLAN_SIFS_CCK_CONT_TX 0x0A
#define WLAN_SIFS_OFDM_CONT_TX 0x0E
#define WLAN_SIFS_CCK_TRX 0x0A
#define WLAN_SIFS_OFDM_TRX 0x10
#define WLAN_NAV_MAX 0xC8
#define WLAN_RDG_NAV 0x05
#define WLAN_TXOP_NAV 0x1B
#define WLAN_CCK_RX_TSF 0x30
#define WLAN_OFDM_RX_TSF 0x30
#define WLAN_TBTT_PROHIBIT 0x04 /* unit : 32us */
#define WLAN_TBTT_HOLD_TIME 0x064 /* unit : 32us */
#define WLAN_DRV_EARLY_INT 0x04
#define WLAN_BCN_CTRL_CLT0 0x10
#define WLAN_BCN_DMA_TIME 0x02
#define WLAN_BCN_MAX_ERR 0xFF
#define WLAN_SIFS_CCK_DUR_TUNE 0x0A
#define WLAN_SIFS_OFDM_DUR_TUNE 0x10
#define WLAN_SIFS_CCK_CTX 0x0A
#define WLAN_SIFS_CCK_IRX 0x0A
#define WLAN_SIFS_OFDM_CTX 0x0E
#define WLAN_SIFS_OFDM_IRX 0x0E
#define WLAN_EIFS_DUR_TUNE 0x40
#define WLAN_EDCA_VO_PARAM 0x002FA226
#define WLAN_EDCA_VI_PARAM 0x005EA328
#define WLAN_EDCA_BE_PARAM 0x005EA42B
#define WLAN_EDCA_BK_PARAM 0x0000A44F
#define WLAN_RX_FILTER0 0xFFFFFFFF
#define WLAN_RX_FILTER2 0xFFFF
#define WLAN_RCR_CFG 0xE400220E
#define WLAN_RXPKT_MAX_SZ 12288
#define WLAN_RXPKT_MAX_SZ_512 (WLAN_RXPKT_MAX_SZ >> 9)
#define WLAN_AMPDU_MAX_TIME 0x70
#define WLAN_RTS_LEN_TH 0xFF
#define WLAN_RTS_TX_TIME_TH 0x08
#define WLAN_MAX_AGG_PKT_LIMIT 0x20
#define WLAN_RTS_MAX_AGG_PKT_LIMIT 0x20
#define WLAN_PRE_TXCNT_TIME_TH 0x1E0
#define FAST_EDCA_VO_TH 0x06
#define FAST_EDCA_VI_TH 0x06
#define FAST_EDCA_BE_TH 0x06
#define FAST_EDCA_BK_TH 0x06
#define WLAN_BAR_RETRY_LIMIT 0x01
#define WLAN_BAR_ACK_TYPE 0x05
#define WLAN_RA_TRY_RATE_AGG_LIMIT 0x08
#define WLAN_RESP_TXRATE 0x84
#define WLAN_ACK_TO 0x21
#define WLAN_ACK_TO_CCK 0x6A
#define WLAN_DATA_RATE_FB_CNT_1_4 0x01000000
#define WLAN_DATA_RATE_FB_CNT_5_8 0x08070504
#define WLAN_RTS_RATE_FB_CNT_5_8 0x08070504
#define WLAN_DATA_RATE_FB_RATE0 0xFE01F010
#define WLAN_DATA_RATE_FB_RATE0_H 0x40000000
#define WLAN_RTS_RATE_FB_RATE1 0x003FF010
#define WLAN_RTS_RATE_FB_RATE1_H 0x40000000
#define WLAN_RTS_RATE_FB_RATE4 0x0600F010
#define WLAN_RTS_RATE_FB_RATE4_H 0x400003E0
#define WLAN_RTS_RATE_FB_RATE5 0x0600F015
#define WLAN_RTS_RATE_FB_RATE5_H 0x000000E0
#define WLAN_TX_FUNC_CFG1 0x30
#define WLAN_TX_FUNC_CFG2 0x30
#define WLAN_MAC_OPT_NORM_FUNC1 0x98
#define WLAN_MAC_OPT_LB_FUNC1 0x80
#define WLAN_MAC_OPT_FUNC2 0x30810041
#define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \
(WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \
(WLAN_SIFS_CCK_TRX << BIT_SHIFT_SIFS_CCK_TRX) | \
(WLAN_SIFS_OFDM_TRX << BIT_SHIFT_SIFS_OFDM_TRX))
#define WLAN_SIFS_DUR_TUNE (WLAN_SIFS_CCK_DUR_TUNE | \
(WLAN_SIFS_OFDM_DUR_TUNE << 8))
#define WLAN_TBTT_TIME (WLAN_TBTT_PROHIBIT |\
(WLAN_TBTT_HOLD_TIME << BIT_SHIFT_TBTT_HOLD_TIME_AP))
#define WLAN_NAV_CFG (WLAN_RDG_NAV | (WLAN_TXOP_NAV << 16))
#define WLAN_RX_TSF_CFG (WLAN_CCK_RX_TSF | (WLAN_OFDM_RX_TSF) << 8)
#define MAC_CLK_SPEED 80 /* 80M */
#define EFUSE_PCB_INFO_OFFSET 0xCA
static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
{
u8 value8;
u16 value16;
u32 value32;
u16 pre_txcnt;
/* txq control */
value8 = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL);
value8 |= (BIT(7) & ~BIT(1) & ~BIT(2));
rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL, value8);
rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN);
/* sifs control */
rtw_write16(rtwdev, REG_SPEC_SIFS, WLAN_SIFS_DUR_TUNE);
rtw_write32(rtwdev, REG_SIFS, WLAN_SIFS_CFG);
rtw_write16(rtwdev, REG_RESP_SIFS_CCK,
WLAN_SIFS_CCK_CTX | WLAN_SIFS_CCK_IRX << 8);
rtw_write16(rtwdev, REG_RESP_SIFS_OFDM,
WLAN_SIFS_OFDM_CTX | WLAN_SIFS_OFDM_IRX << 8);
/* rate fallback control */
rtw_write32(rtwdev, REG_DARFRC, WLAN_DATA_RATE_FB_CNT_1_4);
rtw_write32(rtwdev, REG_DARFRCH, WLAN_DATA_RATE_FB_CNT_5_8);
rtw_write32(rtwdev, REG_RARFRCH, WLAN_RTS_RATE_FB_CNT_5_8);
rtw_write32(rtwdev, REG_ARFR0, WLAN_DATA_RATE_FB_RATE0);
rtw_write32(rtwdev, REG_ARFRH0, WLAN_DATA_RATE_FB_RATE0_H);
rtw_write32(rtwdev, REG_ARFR1_V1, WLAN_RTS_RATE_FB_RATE1);
rtw_write32(rtwdev, REG_ARFRH1_V1, WLAN_RTS_RATE_FB_RATE1_H);
rtw_write32(rtwdev, REG_ARFR4, WLAN_RTS_RATE_FB_RATE4);
rtw_write32(rtwdev, REG_ARFRH4, WLAN_RTS_RATE_FB_RATE4_H);
rtw_write32(rtwdev, REG_ARFR5, WLAN_RTS_RATE_FB_RATE5);
rtw_write32(rtwdev, REG_ARFRH5, WLAN_RTS_RATE_FB_RATE5_H);
/* protocol configuration */
rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, WLAN_AMPDU_MAX_TIME);
rtw_write8_set(rtwdev, REG_TX_HANG_CTRL, BIT_EN_EOF_V1);
pre_txcnt = WLAN_PRE_TXCNT_TIME_TH | BIT_EN_PRECNT;
rtw_write8(rtwdev, REG_PRECNT_CTRL, (u8)(pre_txcnt & 0xFF));
rtw_write8(rtwdev, REG_PRECNT_CTRL + 1, (u8)(pre_txcnt >> 8));
value32 = WLAN_RTS_LEN_TH | (WLAN_RTS_TX_TIME_TH << 8) |
(WLAN_MAX_AGG_PKT_LIMIT << 16) |
(WLAN_RTS_MAX_AGG_PKT_LIMIT << 24);
rtw_write32(rtwdev, REG_PROT_MODE_CTRL, value32);
rtw_write16(rtwdev, REG_BAR_MODE_CTRL + 2,
WLAN_BAR_RETRY_LIMIT | WLAN_RA_TRY_RATE_AGG_LIMIT << 8);
rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING, FAST_EDCA_VO_TH);
rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING + 2, FAST_EDCA_VI_TH);
rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING, FAST_EDCA_BE_TH);
rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING + 2, FAST_EDCA_BK_TH);
/* close BA parser */
rtw_write8_clr(rtwdev, REG_LIFETIME_EN, BIT_BA_PARSER_EN);
rtw_write32_clr(rtwdev, REG_RRSR, BITS_RRSR_RSC);
/* EDCA configuration */
rtw_write32(rtwdev, REG_EDCA_VO_PARAM, WLAN_EDCA_VO_PARAM);
rtw_write32(rtwdev, REG_EDCA_VI_PARAM, WLAN_EDCA_VI_PARAM);
rtw_write32(rtwdev, REG_EDCA_BE_PARAM, WLAN_EDCA_BE_PARAM);
rtw_write32(rtwdev, REG_EDCA_BK_PARAM, WLAN_EDCA_BK_PARAM);
rtw_write8(rtwdev, REG_PIFS, WLAN_PIFS_TIME);
rtw_write8_clr(rtwdev, REG_TX_PTCL_CTRL + 1, BIT_SIFS_BK_EN >> 8);
rtw_write8_set(rtwdev, REG_RD_CTRL + 1,
(BIT_DIS_TXOP_CFE | BIT_DIS_LSIG_CFE |
BIT_DIS_STBC_CFE) >> 8);
/* MAC clock configuration */
rtw_write32_clr(rtwdev, REG_AFE_CTRL1, BIT_MAC_CLK_SEL);
rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
rtw_write8_set(rtwdev, REG_MISC_CTRL,
BIT_EN_FREE_CNT | BIT_DIS_SECOND_CCA);
rtw_write8_clr(rtwdev, REG_TIMER0_SRC_SEL, BIT_TSFT_SEL_TIMER0);
rtw_write16(rtwdev, REG_TXPAUSE, 0x0000);
rtw_write8(rtwdev, REG_SLOT, WLAN_SLOT_TIME);
rtw_write32(rtwdev, REG_RD_NAV_NXT, WLAN_NAV_CFG);
rtw_write16(rtwdev, REG_RXTSF_OFFSET_CCK, WLAN_RX_TSF_CFG);
/* Set beacon cotnrol - enable TSF and other related functions */
rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
/* Set send beacon related registers */
rtw_write32(rtwdev, REG_TBTT_PROHIBIT, WLAN_TBTT_TIME);
rtw_write8(rtwdev, REG_DRVERLYINT, WLAN_DRV_EARLY_INT);
rtw_write8(rtwdev, REG_BCN_CTRL_CLINT0, WLAN_BCN_CTRL_CLT0);
rtw_write8(rtwdev, REG_BCNDMATIM, WLAN_BCN_DMA_TIME);
rtw_write8(rtwdev, REG_BCN_MAX_ERR, WLAN_BCN_MAX_ERR);
/* WMAC configuration */
rtw_write8(rtwdev, REG_BBPSF_CTRL + 2, WLAN_RESP_TXRATE);
rtw_write8(rtwdev, REG_ACKTO, WLAN_ACK_TO);
rtw_write8(rtwdev, REG_ACKTO_CCK, WLAN_ACK_TO_CCK);
rtw_write16(rtwdev, REG_EIFS, WLAN_EIFS_DUR_TUNE);
rtw_write8(rtwdev, REG_NAV_CTRL + 2, WLAN_NAV_MAX);
rtw_write8(rtwdev, REG_WMAC_TRXPTCL_CTL_H + 2, WLAN_BAR_ACK_TYPE);
rtw_write32(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2);
rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG);
rtw_write8(rtwdev, REG_RX_PKT_LIMIT, WLAN_RXPKT_MAX_SZ_512);
rtw_write8(rtwdev, REG_TCR + 2, WLAN_TX_FUNC_CFG2);
rtw_write8(rtwdev, REG_TCR + 1, WLAN_TX_FUNC_CFG1);
rtw_write32_set(rtwdev, REG_GENERAL_OPTION, BIT_DUMMY_FCS_READY_MASK_EN);
rtw_write32(rtwdev, REG_WMAC_OPTION_FUNCTION + 8, WLAN_MAC_OPT_FUNC2);
rtw_write8(rtwdev, REG_WMAC_OPTION_FUNCTION_1, WLAN_MAC_OPT_NORM_FUNC1);
/* init low power */
value16 = rtw_read16(rtwdev, REG_RXPSF_CTRL + 2) & 0xF00F;
value16 |= (BIT_RXGCK_VHT_FIFOTHR(1) | BIT_RXGCK_HT_FIFOTHR(1) |
BIT_RXGCK_OFDM_FIFOTHR(1) | BIT_RXGCK_CCK_FIFOTHR(1)) >> 16;
rtw_write16(rtwdev, REG_RXPSF_CTRL + 2, value16);
value16 = 0;
value16 = BIT_SET_RXPSF_PKTLENTHR(value16, 1);
value16 |= BIT_RXPSF_CTRLEN | BIT_RXPSF_VHTCHKEN | BIT_RXPSF_HTCHKEN
| BIT_RXPSF_OFDMCHKEN | BIT_RXPSF_CCKCHKEN
| BIT_RXPSF_OFDMRST;
rtw_write16(rtwdev, REG_RXPSF_CTRL, value16);
rtw_write32(rtwdev, REG_RXPSF_TYPE_CTRL, 0xFFFFFFFF);
/* rx ignore configuration */
value16 = rtw_read16(rtwdev, REG_RXPSF_CTRL);
value16 &= ~(BIT_RXPSF_MHCHKEN | BIT_RXPSF_CCKRST |
BIT_RXPSF_CONT_ERRCHKEN);
value16 = BIT_SET_RXPSF_ERRTHR(value16, 0x07);
rtw_write16(rtwdev, REG_RXPSF_CTRL, value16);
return 0;
}
static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
{
#define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8))
#define RF18_BAND_2G (0)
#define RF18_BAND_5G (BIT(16) | BIT(8))
#define RF18_CHANNEL_MASK (MASKBYTE0)
#define RF18_RFSI_MASK (BIT(18) | BIT(17))
#define RF18_RFSI_GE_CH80 (BIT(17))
#define RF18_RFSI_GT_CH140 (BIT(18))
#define RF18_BW_MASK (BIT(13) | BIT(12))
#define RF18_BW_20M (BIT(13) | BIT(12))
#define RF18_BW_40M (BIT(13))
#define RF18_BW_80M (BIT(12))
u32 rf_reg18 = 0;
u32 rf_rxbb = 0;
rf_reg18 = rtw_read_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK);
rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK |
RF18_BW_MASK);
rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G);
rf_reg18 |= (channel & RF18_CHANNEL_MASK);
if (channel > 144)
rf_reg18 |= RF18_RFSI_GT_CH140;
else if (channel >= 80)
rf_reg18 |= RF18_RFSI_GE_CH80;
switch (bw) {
case RTW_CHANNEL_WIDTH_5:
case RTW_CHANNEL_WIDTH_10:
case RTW_CHANNEL_WIDTH_20:
default:
rf_reg18 |= RF18_BW_20M;
rf_rxbb = 0x18;
break;
case RTW_CHANNEL_WIDTH_40:
/* RF bandwidth */
rf_reg18 |= RF18_BW_40M;
rf_rxbb = 0x10;
break;
case RTW_CHANNEL_WIDTH_80:
rf_reg18 |= RF18_BW_80M;
rf_rxbb = 0x8;
break;
}
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, 0x04, 0x01);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, 0x1f, 0x12);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, 0xfffff, rf_rxbb);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, 0x04, 0x00);
rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWE2, 0x04, 0x01);
rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWA, 0x1f, 0x12);
rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWD0, 0xfffff, rf_rxbb);
rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWE2, 0x04, 0x00);
rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, rf_reg18);
rtw_write_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK, rf_reg18);
}
static void rtw8822c_toggle_igi(struct rtw_dev *rtwdev)
{
u32 igi;
igi = rtw_read32_mask(rtwdev, REG_RXIGI, 0x7f);
rtw_write32_mask(rtwdev, REG_RXIGI, 0x7f, igi - 2);
rtw_write32_mask(rtwdev, REG_RXIGI, 0x7f00, igi - 2);
rtw_write32_mask(rtwdev, REG_RXIGI, 0x7f, igi);
rtw_write32_mask(rtwdev, REG_RXIGI, 0x7f00, igi);
}
static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 primary_ch_idx)
{
if (channel <= 14) {
rtw_write32_clr(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
rtw_write32_set(rtwdev, REG_TXF4, BIT(20));
rtw_write32_clr(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN);
rtw_write32_clr(rtwdev, REG_CCKTXONLY, BIT_BB_CCK_CHECK_EN);
rtw_write32_mask(rtwdev, REG_CCAMSK, 0x3F000000, 0xF);
rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x0);
rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x0);
if (channel == 13 || channel == 14)
rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x969);
else if (channel == 11 || channel == 12)
rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x96a);
else
rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x9aa);
if (channel == 14) {
rtw_write32_mask(rtwdev, REG_TXF0, MASKHWORD, 0x3da0);
rtw_write32_mask(rtwdev, REG_TXF1, MASKDWORD,
0x4962c931);
rtw_write32_mask(rtwdev, REG_TXF2, MASKLWORD, 0x6aa3);
rtw_write32_mask(rtwdev, REG_TXF3, MASKHWORD, 0xaa7b);
rtw_write32_mask(rtwdev, REG_TXF4, MASKLWORD, 0xf3d7);
rtw_write32_mask(rtwdev, REG_TXF5, MASKDWORD, 0x0);
rtw_write32_mask(rtwdev, REG_TXF6, MASKDWORD,
0xff012455);
rtw_write32_mask(rtwdev, REG_TXF7, MASKDWORD, 0xffff);
} else {
rtw_write32_mask(rtwdev, REG_TXF0, MASKHWORD, 0x5284);
rtw_write32_mask(rtwdev, REG_TXF1, MASKDWORD,
0x3e18fec8);
rtw_write32_mask(rtwdev, REG_TXF2, MASKLWORD, 0x0a88);
rtw_write32_mask(rtwdev, REG_TXF3, MASKHWORD, 0xacc4);
rtw_write32_mask(rtwdev, REG_TXF4, MASKLWORD, 0xc8b2);
rtw_write32_mask(rtwdev, REG_TXF5, MASKDWORD,
0x00faf0de);
rtw_write32_mask(rtwdev, REG_TXF6, MASKDWORD,
0x00122344);
rtw_write32_mask(rtwdev, REG_TXF7, MASKDWORD,
0x0fffffff);
}
if (channel == 13)
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
else
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x1);
} else if (channel > 35) {
rtw_write32_set(rtwdev, REG_CCKTXONLY, BIT_BB_CCK_CHECK_EN);
rtw_write32_set(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN);
rtw_write32_set(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
rtw_write32_clr(rtwdev, REG_TXF4, BIT(20));
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x0);
rtw_write32_mask(rtwdev, REG_CCAMSK, 0x3F000000, 0x22);
rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
if (channel >= 36 && channel <= 64) {
rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x1);
rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x1);
} else if (channel >= 100 && channel <= 144) {
rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x2);
rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x2);
} else if (channel >= 149) {
rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x3);
rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x3);
}
if (channel >= 36 && channel <= 51)
rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x494);
else if (channel >= 52 && channel <= 55)
rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x493);
else if (channel >= 56 && channel <= 111)
rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x453);
else if (channel >= 112 && channel <= 119)
rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x452);
else if (channel >= 120 && channel <= 172)
rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x412);
else if (channel >= 173 && channel <= 177)
rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x411);
}
switch (bw) {
case RTW_CHANNEL_WIDTH_20:
rtw_write32_mask(rtwdev, REG_DFIRBW, 0x3FF0, 0x19B);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x0);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xffc0, 0x0);
rtw_write32_mask(rtwdev, REG_TXCLK, 0x700, 0x7);
rtw_write32_mask(rtwdev, REG_TXCLK, 0x700000, 0x6);
break;
case RTW_CHANNEL_WIDTH_40:
rtw_write32_mask(rtwdev, REG_CCKSB, BIT(4),
(primary_ch_idx == 1 ? 1 : 0));
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x5);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xc0, 0x0);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xff00,
(primary_ch_idx | (primary_ch_idx << 4)));
break;
case RTW_CHANNEL_WIDTH_80:
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0xa);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xc0, 0x0);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xff00,
(primary_ch_idx | (primary_ch_idx << 4)));
break;
case RTW_CHANNEL_WIDTH_5:
rtw_write32_mask(rtwdev, REG_DFIRBW, 0x3FF0, 0x2AB);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x0);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xffc0, 0x1);
rtw_write32_mask(rtwdev, REG_TXCLK, 0x700, 0x4);
rtw_write32_mask(rtwdev, REG_TXCLK, 0x700000, 0x4);
break;
case RTW_CHANNEL_WIDTH_10:
rtw_write32_mask(rtwdev, REG_DFIRBW, 0x3FF0, 0x2AB);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x0);
rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xffc0, 0x2);
rtw_write32_mask(rtwdev, REG_TXCLK, 0x700, 0x6);
rtw_write32_mask(rtwdev, REG_TXCLK, 0x700000, 0x5);
break;
}
}
static void rtw8822c_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
u8 primary_chan_idx)
{
rtw8822c_set_channel_bb(rtwdev, channel, bw, primary_chan_idx);
rtw_set_channel_mac(rtwdev, channel, bw, primary_chan_idx);
rtw8822c_set_channel_rf(rtwdev, channel, bw);
rtw8822c_toggle_igi(rtwdev);
}
static void rtw8822c_config_cck_rx_path(struct rtw_dev *rtwdev, u8 rx_path)
{
if (rx_path == BB_PATH_A || rx_path == BB_PATH_B) {
rtw_write32_mask(rtwdev, REG_CCANRX, 0x00060000, 0x0);
rtw_write32_mask(rtwdev, REG_CCANRX, 0x00600000, 0x0);
} else if (rx_path == BB_PATH_AB) {
rtw_write32_mask(rtwdev, REG_CCANRX, 0x00600000, 0x1);
rtw_write32_mask(rtwdev, REG_CCANRX, 0x00060000, 0x1);
}
if (rx_path == BB_PATH_A)
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0x0f000000, 0x0);
else if (rx_path == BB_PATH_B)
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0x0f000000, 0x5);
else if (rx_path == BB_PATH_AB)
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0x0f000000, 0x1);
}
static void rtw8822c_config_ofdm_rx_path(struct rtw_dev *rtwdev, u8 rx_path)
{
if (rx_path == BB_PATH_A || rx_path == BB_PATH_B) {
rtw_write32_mask(rtwdev, REG_RXFNCTL, 0x300, 0x0);
rtw_write32_mask(rtwdev, REG_RXFNCTL, 0x600000, 0x0);
rtw_write32_mask(rtwdev, REG_AGCSWSH, BIT(17), 0x0);
rtw_write32_mask(rtwdev, REG_ANTWTPD, BIT(20), 0x0);
rtw_write32_mask(rtwdev, REG_MRCM, BIT(24), 0x0);
} else if (rx_path == BB_PATH_AB) {
rtw_write32_mask(rtwdev, REG_RXFNCTL, 0x300, 0x1);
rtw_write32_mask(rtwdev, REG_RXFNCTL, 0x600000, 0x1);
rtw_write32_mask(rtwdev, REG_AGCSWSH, BIT(17), 0x1);
rtw_write32_mask(rtwdev, REG_ANTWTPD, BIT(20), 0x1);
rtw_write32_mask(rtwdev, REG_MRCM, BIT(24), 0x1);
}
rtw_write32_mask(rtwdev, 0x824, 0x0f000000, rx_path);
rtw_write32_mask(rtwdev, 0x824, 0x000f0000, rx_path);
}
static void rtw8822c_config_rx_path(struct rtw_dev *rtwdev, u8 rx_path)
{
rtw8822c_config_cck_rx_path(rtwdev, rx_path);
rtw8822c_config_ofdm_rx_path(rtwdev, rx_path);
}
static void rtw8822c_config_cck_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
bool is_tx2_path)
{
if (tx_path == BB_PATH_A) {
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
} else if (tx_path == BB_PATH_B) {
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x4);
} else {
if (is_tx2_path)
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0xc);
else
rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
}
}
static void rtw8822c_config_ofdm_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
bool is_tx2_path)
{
if (tx_path == BB_PATH_A) {
rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x11);
rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xff, 0x0);
} else if (tx_path == BB_PATH_B) {
rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x12);
rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xff, 0x0);
} else {
if (is_tx2_path) {
rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x33);
rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0404);
} else {
rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x31);
rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0400);
}
}
}
static void rtw8822c_config_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
bool is_tx2_path)
{
rtw8822c_config_cck_tx_path(rtwdev, tx_path, is_tx2_path);
rtw8822c_config_ofdm_tx_path(rtwdev, tx_path, is_tx2_path);
}
static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path)
{
if ((tx_path | rx_path) & BB_PATH_A)
rtw_write32_mask(rtwdev, REG_ORITXCODE, MASK20BITS, 0x33312);
else
rtw_write32_mask(rtwdev, REG_ORITXCODE, MASK20BITS, 0x11111);
if ((tx_path | rx_path) & BB_PATH_B)
rtw_write32_mask(rtwdev, REG_ORITXCODE2, MASK20BITS, 0x33312);
else
rtw_write32_mask(rtwdev, REG_ORITXCODE2, MASK20BITS, 0x11111);
rtw8822c_config_rx_path(rtwdev, rx_path);
rtw8822c_config_tx_path(rtwdev, tx_path, is_tx2_path);
rtw8822c_toggle_igi(rtwdev);
}
static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 l_bnd, u_bnd;
u8 gain_a, gain_b;
s8 rx_power[RTW_RF_PATH_MAX];
s8 min_rx_power = -120;
rx_power[RF_PATH_A] = GET_PHY_STAT_P0_PWDB_A(phy_status);
rx_power[RF_PATH_B] = GET_PHY_STAT_P0_PWDB_B(phy_status);
l_bnd = dm_info->cck_gi_l_bnd;
u_bnd = dm_info->cck_gi_u_bnd;
gain_a = GET_PHY_STAT_P0_GAIN_A(phy_status);
gain_b = GET_PHY_STAT_P0_GAIN_B(phy_status);
if (gain_a < l_bnd)
rx_power[RF_PATH_A] += (l_bnd - gain_a) << 1;
else if (gain_a > u_bnd)
rx_power[RF_PATH_A] -= (gain_a - u_bnd) << 1;
if (gain_b < l_bnd)
rx_power[RF_PATH_A] += (l_bnd - gain_b) << 1;
else if (gain_b > u_bnd)
rx_power[RF_PATH_A] -= (gain_b - u_bnd) << 1;
rx_power[RF_PATH_A] -= 110;
rx_power[RF_PATH_B] -= 110;
pkt_stat->rx_power[RF_PATH_A] = max3(rx_power[RF_PATH_A],
rx_power[RF_PATH_B],
min_rx_power);
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
min_rx_power);
}
static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
u8 rxsc, bw;
s8 min_rx_power = -120;
if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
else
rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
if (rxsc >= 9 && rxsc <= 12)
bw = RTW_CHANNEL_WIDTH_40;
else if (rxsc >= 13)
bw = RTW_CHANNEL_WIDTH_80;
else
bw = RTW_CHANNEL_WIDTH_20;
pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110;
pkt_stat->rx_power[RF_PATH_B] = GET_PHY_STAT_P1_PWDB_B(phy_status) - 110;
pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 2);
pkt_stat->bw = bw;
pkt_stat->signal_power = max3(pkt_stat->rx_power[RF_PATH_A],
pkt_stat->rx_power[RF_PATH_B],
min_rx_power);
}
static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
u8 page;
page = *phy_status & 0xf;
switch (page) {
case 0:
query_phy_status_page0(rtwdev, phy_status, pkt_stat);
break;
case 1:
query_phy_status_page1(rtwdev, phy_status, pkt_stat);
break;
default:
rtw_warn(rtwdev, "unused phy status page (%d)\n", page);
return;
}
}
static void rtw8822c_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
struct rtw_rx_pkt_stat *pkt_stat,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_hdr *hdr;
u32 desc_sz = rtwdev->chip->rx_pkt_desc_sz;
u8 *phy_status = NULL;
memset(pkt_stat, 0, sizeof(*pkt_stat));
pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
pkt_stat->shift = GET_RX_DESC_SHIFT(rx_desc);
pkt_stat->rate = GET_RX_DESC_RX_RATE(rx_desc);
pkt_stat->cam_id = GET_RX_DESC_MACID(rx_desc);
pkt_stat->ppdu_cnt = GET_RX_DESC_PPDU_CNT(rx_desc);
pkt_stat->tsf_low = GET_RX_DESC_TSFL(rx_desc);
/* drv_info_sz is in unit of 8-bytes */
pkt_stat->drv_info_sz *= 8;
/* c2h cmd pkt's rx/phy status is not interested */
if (pkt_stat->is_c2h)
return;
hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift +
pkt_stat->drv_info_sz);
if (pkt_stat->phy_status) {
phy_status = rx_desc + desc_sz + pkt_stat->shift;
query_phy_status(rtwdev, phy_status, pkt_stat);
}
rtw_rx_fill_rx_status(rtwdev, pkt_stat, hdr, rx_status, phy_status);
}
static void
rtw8822c_set_write_tx_power_ref(struct rtw_dev *rtwdev, u8 *tx_pwr_ref_cck,
u8 *tx_pwr_ref_ofdm)
{
struct rtw_hal *hal = &rtwdev->hal;
u32 txref_cck[2] = {0x18a0, 0x41a0};
u32 txref_ofdm[2] = {0x18e8, 0x41e8};
u8 path;
for (path = 0; path < hal->rf_path_num; path++) {
rtw_write32_mask(rtwdev, 0x1c90, BIT(15), 0);
rtw_write32_mask(rtwdev, txref_cck[path], 0x7f0000,
tx_pwr_ref_cck[path]);
}
for (path = 0; path < hal->rf_path_num; path++) {
rtw_write32_mask(rtwdev, 0x1c90, BIT(15), 0);
rtw_write32_mask(rtwdev, txref_ofdm[path], 0x1fc00,
tx_pwr_ref_ofdm[path]);
}
}
static void rtw8822c_set_tx_power_diff(struct rtw_dev *rtwdev, u8 rate,
s8 *diff_idx)
{
u32 offset_txagc = 0x3a00;
u8 rate_idx = rate & 0xfc;
u8 pwr_idx[4];
u32 phy_pwr_idx;
int i;
for (i = 0; i < 4; i++)
pwr_idx[i] = diff_idx[i] & 0x7f;
phy_pwr_idx = pwr_idx[0] |
(pwr_idx[1] << 8) |
(pwr_idx[2] << 16) |
(pwr_idx[3] << 24);
rtw_write32_mask(rtwdev, 0x1c90, BIT(15), 0x0);
rtw_write32_mask(rtwdev, offset_txagc + rate_idx, MASKDWORD,
phy_pwr_idx);
}
static void rtw8822c_set_tx_power_index(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
u8 rs, rate, j;
u8 pwr_ref_cck[2] = {hal->tx_pwr_tbl[RF_PATH_A][DESC_RATE11M],
hal->tx_pwr_tbl[RF_PATH_B][DESC_RATE11M]};
u8 pwr_ref_ofdm[2] = {hal->tx_pwr_tbl[RF_PATH_A][DESC_RATEMCS7],
hal->tx_pwr_tbl[RF_PATH_B][DESC_RATEMCS7]};
s8 diff_a, diff_b;
u8 pwr_a, pwr_b;
s8 diff_idx[4];
rtw8822c_set_write_tx_power_ref(rtwdev, pwr_ref_cck, pwr_ref_ofdm);
for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) {
for (j = 0; j < rtw_rate_size[rs]; j++) {
rate = rtw_rate_section[rs][j];
pwr_a = hal->tx_pwr_tbl[RF_PATH_A][rate];
pwr_b = hal->tx_pwr_tbl[RF_PATH_B][rate];
if (rs == 0) {
diff_a = (s8)pwr_a - (s8)pwr_ref_cck[0];
diff_b = (s8)pwr_b - (s8)pwr_ref_cck[1];
} else {
diff_a = (s8)pwr_a - (s8)pwr_ref_ofdm[0];
diff_b = (s8)pwr_b - (s8)pwr_ref_ofdm[1];
}
diff_idx[rate % 4] = min(diff_a, diff_b);
if (rate % 4 == 3)
rtw8822c_set_tx_power_diff(rtwdev, rate - 3,
diff_idx);
}
}
}
static void rtw8822c_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
{
u8 ldo_pwr;
ldo_pwr = rtw_read8(rtwdev, REG_ANAPARLDO_POW_MAC);
ldo_pwr = enable ? ldo_pwr | BIT_LDOE25_PON : ldo_pwr & ~BIT_LDOE25_PON;
rtw_write8(rtwdev, REG_ANAPARLDO_POW_MAC, ldo_pwr);
}
static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u32 cck_enable;
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
u32 ofdm_tx_counter;
cck_enable = rtw_read32(rtwdev, REG_ENCCK) & BIT_CCK_BLK_EN;
cck_fa_cnt = rtw_read16(rtwdev, REG_CCK_FACNT);
ofdm_fa_cnt = rtw_read16(rtwdev, REG_OFDM_FACNT);
ofdm_tx_counter = rtw_read16(rtwdev, REG_OFDM_TXCNT);
ofdm_fa_cnt -= ofdm_tx_counter;
dm_info->cck_fa_cnt = cck_fa_cnt;
dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
dm_info->total_fa_cnt = ofdm_fa_cnt;
dm_info->total_fa_cnt += cck_enable ? cck_fa_cnt : 0;
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 0);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 2);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 0);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 2);
rtw_write32_set(rtwdev, REG_CNT_CTRL, BIT_ALL_CNT_RST);
rtw_write32_clr(rtwdev, REG_CNT_CTRL, BIT_ALL_CNT_RST);
}
static void rtw8822c_do_iqk(struct rtw_dev *rtwdev)
{
}
static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_SDIO,
RTW_PWR_CMD_WRITE, BIT(0), 0},
{0x0086,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_SDIO,
RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
{0x002E,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
{0x002D,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), 0},
{0x007F,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(7), 0},
{0x004A,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), 0},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(3) | BIT(4) | BIT(7), 0},
{0xFFFF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
0,
RTW_PWR_CMD_END, 0, 0},
};
static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
{0x0000,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(5), 0},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3) | BIT(2)), 0},
{0x0075,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
{0x0006,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
{0x0075,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), 0},
{0xFF1A,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0},
{0x002E,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(3), 0},
{0x0006,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(7), 0},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_POLLING, BIT(0), 0},
{0x0074,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
{0x0071,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(4), 0},
{0x0062,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6) | BIT(5)),
(BIT(7) | BIT(6) | BIT(5))},
{0x0061,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6) | BIT(5)), 0},
{0x001F,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6)), BIT(7)},
{0x00EF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6)), BIT(7)},
{0x1045,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
{0x0010,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
{0xFFFF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
0,
RTW_PWR_CMD_END, 0, 0},
};
static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822c[] = {
{0x0093,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(3), 0},
{0x001F,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0},
{0x00EF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0},
{0x1045,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(4), 0},
{0xFF1A,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0x30},
{0x0049,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(1), 0},
{0x0006,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
{0x0002,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(1), 0},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_POLLING, BIT(1), 0},
{0x0000,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
{0xFFFF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
0,
RTW_PWR_CMD_END, 0, 0},
};
static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822c[] = {
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
{0x0007,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, 0xFF, 0x00},
{0x0067,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(5), 0},
{0x004A,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(0), 0},
{0x0081,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(7) | BIT(6), 0},
{0x0090,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(1), 0},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)},
{0x0005,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_PCI_MSK,
RTW_PWR_ADDR_MAC,
RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
{0x0086,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_SDIO_MSK,
RTW_PWR_ADDR_SDIO,
RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
{0xFFFF,
RTW_PWR_CUT_ALL_MSK,
RTW_PWR_INTF_ALL_MSK,
0,
RTW_PWR_CMD_END, 0, 0},
};
static struct rtw_pwr_seq_cmd *card_enable_flow_8822c[] = {
trans_carddis_to_cardemu_8822c,
trans_cardemu_to_act_8822c,
NULL
};
static struct rtw_pwr_seq_cmd *card_disable_flow_8822c[] = {
trans_act_to_cardemu_8822c,
trans_cardemu_to_carddis_8822c,
NULL
};
static struct rtw_intf_phy_para usb2_param_8822c[] = {
{0xFFFF, 0x00,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
static struct rtw_intf_phy_para usb3_param_8822c[] = {
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
static struct rtw_intf_phy_para pcie_gen1_param_8822c[] = {
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
static struct rtw_intf_phy_para pcie_gen2_param_8822c[] = {
{0xFFFF, 0x0000,
RTW_IP_SEL_PHY,
RTW_INTF_PHY_CUT_ALL,
RTW_INTF_PHY_PLATFORM_ALL},
};
static struct rtw_intf_phy_para_table phy_para_table_8822c = {
.usb2_para = usb2_param_8822c,
.usb3_para = usb3_param_8822c,
.gen1_para = pcie_gen1_param_8822c,
.gen2_para = pcie_gen2_param_8822c,
.n_usb2_para = ARRAY_SIZE(usb2_param_8822c),
.n_usb3_para = ARRAY_SIZE(usb2_param_8822c),
.n_gen1_para = ARRAY_SIZE(pcie_gen1_param_8822c),
.n_gen2_para = ARRAY_SIZE(pcie_gen2_param_8822c),
};
static const struct rtw_rfe_def rtw8822c_rfe_defs[] = {
[0] = RTW_DEF_RFE(8822c, 0, 0),
[1] = RTW_DEF_RFE(8822c, 0, 0),
[2] = RTW_DEF_RFE(8822c, 0, 0),
};
static struct rtw_hw_reg rtw8822c_dig[] = {
[0] = { .addr = 0x1d70, .mask = 0x7f },
[1] = { .addr = 0x1d70, .mask = 0x7f00 },
};
static struct rtw_page_table page_table_8822c[] = {
{64, 64, 64, 64, 1},
{64, 64, 64, 64, 1},
{64, 64, 0, 0, 1},
{64, 64, 64, 0, 1},
{64, 64, 64, 64, 1},
};
static struct rtw_rqpn rqpn_table_8822c[] = {
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_HIGH,
RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
{RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
};
static struct rtw_chip_ops rtw8822c_ops = {
.phy_set_param = rtw8822c_phy_set_param,
.read_efuse = rtw8822c_read_efuse,
.query_rx_desc = rtw8822c_query_rx_desc,
.set_channel = rtw8822c_set_channel,
.mac_init = rtw8822c_mac_init,
.read_rf = rtw_phy_read_rf,
.write_rf = rtw_phy_write_rf_reg_mix,
.set_tx_power_index = rtw8822c_set_tx_power_index,
.cfg_ldo25 = rtw8822c_cfg_ldo25,
.false_alarm_statistics = rtw8822c_false_alarm_statistics,
.do_iqk = rtw8822c_do_iqk,
};
struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
.fw_name = "rtw88/rtw8822c_fw.bin",
.tx_pkt_desc_sz = 48,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
.rx_buf_desc_sz = 8,
.phy_efuse_size = 512,
.log_efuse_size = 768,
.ptct_efuse_size = 124,
.txff_size = 262144,
.rxff_size = 24576,
.txgi_factor = 2,
.is_pwr_by_rate_dec = false,
.max_power_index = 0x7f,
.csi_buf_pg_num = 50,
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = 128,
.dig_min = 0x20,
.ht_supported = true,
.vht_supported = true,
.sys_func_en = 0xD8,
.pwr_on_seq = card_enable_flow_8822c,
.pwr_off_seq = card_disable_flow_8822c,
.page_table = page_table_8822c,
.rqpn_table = rqpn_table_8822c,
.intf_table = &phy_para_table_8822c,
.dig = rtw8822c_dig,
.rf_base_addr = {0x3c00, 0x4c00},
.rf_sipi_addr = {0x1808, 0x4108},
.mac_tbl = &rtw8822c_mac_tbl,
.agc_tbl = &rtw8822c_agc_tbl,
.bb_tbl = &rtw8822c_bb_tbl,
.rfk_init_tbl = &rtw8822c_array_mp_cal_init_tbl,
.rf_tbl = {&rtw8822c_rf_a_tbl, &rtw8822c_rf_b_tbl},
.rfe_defs = rtw8822c_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8822c_rfe_defs),
};
EXPORT_SYMBOL(rtw8822c_hw_spec);
MODULE_FIRMWARE("rtw88/rtw8822c_fw.bin");
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW8822C_H__
#define __RTW8822C_H__
#include <asm/byteorder.h>
struct rtw8822cu_efuse {
u8 res0[0x30]; /* 0x120 */
u8 vid[2]; /* 0x150 */
u8 pid[2];
u8 res1[3];
u8 mac_addr[ETH_ALEN]; /* 0x157 */
u8 res2[0x3d];
};
struct rtw8822ce_efuse {
u8 mac_addr[ETH_ALEN]; /* 0x120 */
u8 vender_id[2];
u8 device_id[2];
u8 sub_vender_id[2];
u8 sub_device_id[2];
u8 pmc[2];
u8 exp_device_cap[2];
u8 msi_cap;
u8 ltr_cap; /* 0x133 */
u8 exp_link_control[2];
u8 link_cap[4];
u8 link_control[2];
u8 serial_number[8];
u8 res0:2; /* 0x144 */
u8 ltr_en:1;
u8 res1:2;
u8 obff:2;
u8 res2:3;
u8 obff_cap:2;
u8 res3:4;
u8 class_code[3];
u8 res4;
u8 pci_pm_L1_2_supp:1;
u8 pci_pm_L1_1_supp:1;
u8 aspm_pm_L1_2_supp:1;
u8 aspm_pm_L1_1_supp:1;
u8 L1_pm_substates_supp:1;
u8 res5:3;
u8 port_common_mode_restore_time;
u8 port_t_power_on_scale:2;
u8 res6:1;
u8 port_t_power_on_value:5;
u8 res7;
};
struct rtw8822c_efuse {
__le16 rtl_id;
u8 res0[0x0e];
/* power index for four RF paths */
struct rtw_txpwr_idx txpwr_idx_table[4];
u8 channel_plan; /* 0xb8 */
u8 xtal_k;
u8 res1;
u8 iqk_lck;
u8 res2[5]; /* 0xbc */
u8 rf_board_option;
u8 rf_feature_option;
u8 rf_bt_setting;
u8 eeprom_version;
u8 eeprom_customer_id;
u8 tx_bb_swing_setting_2g;
u8 tx_bb_swing_setting_5g;
u8 tx_pwr_calibrate_rate;
u8 rf_antenna_option; /* 0xc9 */
u8 rfe_option;
u8 country_code[2];
u8 res3[3];
u8 path_a_thermal; /* 0xd0 */
u8 path_b_thermal;
u8 res4[2];
u8 rx_gain_gap_2g_ofdm;
u8 res5;
u8 rx_gain_gap_2g_cck;
u8 res6;
u8 rx_gain_gap_5gl;
u8 res7;
u8 rx_gain_gap_5gm;
u8 res8;
u8 rx_gain_gap_5gh;
u8 res9;
u8 res10[0x42];
union {
struct rtw8822cu_efuse u;
struct rtw8822ce_efuse e;
};
};
#define DACK_PATH_8822C 2
#define DACK_REG_8822C 16
#define DACK_RF_8822C 1
#define DACK_SN_8822C 100
/* phy status page0 */
#define GET_PHY_STAT_P0_PWDB_A(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
#define GET_PHY_STAT_P0_PWDB_B(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
#define GET_PHY_STAT_P0_GAIN_A(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(21, 16))
#define GET_PHY_STAT_P0_GAIN_B(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(29, 24))
/* phy status page1 */
#define GET_PHY_STAT_P1_PWDB_A(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
#define GET_PHY_STAT_P1_PWDB_B(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(23, 16))
#define GET_PHY_STAT_P1_L_RXSC(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
#define REG_ANAPARLDO_POW_MAC 0x0029
#define BIT_LDOE25_PON BIT(0)
#define REG_RRSR 0x0440
#define BITS_RRSR_RSC (BIT(21) | BIT(22))
#define REG_TXDFIR0 0x808
#define REG_DFIRBW 0x810
#define REG_ANTMAP0 0x820
#define REG_ANTMAP 0x824
#define REG_DYMPRITH 0x86c
#define REG_DYMENTH0 0x870
#define REG_DYMENTH 0x874
#define REG_DYMTHMIN 0x8a4
#define REG_TXBWCTL 0x9b0
#define REG_TXCLK 0x9b4
#define REG_SCOTRK 0xc30
#define REG_MRCM 0xc38
#define REG_AGCSWSH 0xc44
#define REG_ANTWTPD 0xc54
#define REG_ORITXCODE 0x1800
#define REG_3WIRE 0x180c
#define BIT_3WIRE_TX_EN BIT(0)
#define BIT_3WIRE_RX_EN BIT(1)
#define BIT_3WIRE_PI_ON BIT(28)
#define REG_RXAGCCTL0 0x18ac
#define REG_CCKSB 0x1a00
#define REG_RXCCKSEL 0x1a04
#define REG_BGCTRL 0x1a14
#define BITS_RX_IQ_WEIGHT (BIT(8) | BIT(9))
#define REG_TXF0 0x1a20
#define REG_TXF1 0x1a24
#define REG_TXF2 0x1a28
#define REG_CCANRX 0x1a2c
#define BIT_CCK_FA_RST (BIT(14) | BIT(15))
#define BIT_OFDM_FA_RST (BIT(12) | BIT(13))
#define REG_CCK_FACNT 0x1a5c
#define REG_CCKTXONLY 0x1a80
#define BIT_BB_CCK_CHECK_EN BIT(18)
#define REG_TXF3 0x1a98
#define REG_TXF4 0x1a9c
#define REG_TXF5 0x1aa0
#define REG_TXF6 0x1aac
#define REG_TXF7 0x1ab0
#define REG_TXANT 0x1c28
#define REG_ENCCK 0x1c3c
#define BIT_CCK_BLK_EN BIT(1)
#define BIT_CCK_OFDM_BLK_EN (BIT(0) | BIT(1))
#define REG_CCAMSK 0x1c80
#define REG_RXFNCTL 0x1d30
#define REG_RXIGI 0x1d70
#define REG_ENFN 0x1e24
#define REG_TXANTSEG 0x1e28
#define REG_TXLGMAP 0x1e2c
#define REG_CCKPATH 0x1e5c
#define REG_CNT_CTRL 0x1eb4
#define BIT_ALL_CNT_RST BIT(25)
#define REG_OFDM_FACNT 0x2d00
#define REG_OFDM_TXCNT 0x2de0
#define REG_ORITXCODE2 0x4100
#define REG_3WIRE2 0x410c
#define REG_RXAGCCTL 0x41ac
#endif
This source diff could not be displayed because it is too large. You can view the blob instead.
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW8822C_TABLE_H__
#define __RTW8822C_TABLE_H__
extern const struct rtw_table rtw8822c_mac_tbl;
extern const struct rtw_table rtw8822c_agc_tbl;
extern const struct rtw_table rtw8822c_bb_tbl;
extern const struct rtw_table rtw8822c_bb_pg_type0_tbl;
extern const struct rtw_table rtw8822c_rf_a_tbl;
extern const struct rtw_table rtw8822c_rf_b_tbl;
extern const struct rtw_table rtw8822c_txpwr_lmt_type0_tbl;
extern const struct rtw_table rtw8822c_array_mp_cal_init_tbl;
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include "main.h"
#include "rx.h"
#include "ps.h"
void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
struct rtw_vif *rtwvif;
hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_data(hdr->frame_control))
return;
if (!is_broadcast_ether_addr(hdr->addr1) &&
!is_multicast_ether_addr(hdr->addr1)) {
rtwdev->stats.rx_unicast += skb->len;
rtwdev->stats.rx_cnt++;
if (vif) {
rtwvif = (struct rtw_vif *)vif->drv_priv;
rtwvif->stats.rx_unicast += skb->len;
rtwvif->stats.rx_cnt++;
if (rtwvif->stats.rx_cnt > RTW_LPS_THRESHOLD)
rtw_leave_lps_irqsafe(rtwdev, rtwvif);
}
}
}
EXPORT_SYMBOL(rtw_rx_stats);
struct rtw_rx_addr_match_data {
struct rtw_dev *rtwdev;
struct ieee80211_hdr *hdr;
struct rtw_rx_pkt_stat *pkt_stat;
u8 *bssid;
};
static void rtw_rx_addr_match_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct rtw_rx_addr_match_data *iter_data = data;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr = iter_data->hdr;
struct rtw_dev *rtwdev = iter_data->rtwdev;
struct rtw_sta_info *si;
struct rtw_rx_pkt_stat *pkt_stat = iter_data->pkt_stat;
u8 *bssid = iter_data->bssid;
if (ether_addr_equal(vif->bss_conf.bssid, bssid) &&
(ether_addr_equal(vif->addr, hdr->addr1) ||
ieee80211_is_beacon(hdr->frame_control)))
sta = ieee80211_find_sta_by_ifaddr(rtwdev->hw, hdr->addr2,
vif->addr);
else
return;
if (!sta)
return;
si = (struct rtw_sta_info *)sta->drv_priv;
ewma_rssi_add(&si->avg_rssi, pkt_stat->rssi);
}
static void rtw_rx_addr_match(struct rtw_dev *rtwdev,
struct rtw_rx_pkt_stat *pkt_stat,
struct ieee80211_hdr *hdr)
{
struct rtw_rx_addr_match_data data = {};
if (pkt_stat->crc_err || pkt_stat->icv_err || !pkt_stat->phy_status ||
ieee80211_is_ctl(hdr->frame_control))
return;
data.rtwdev = rtwdev;
data.hdr = hdr;
data.pkt_stat = pkt_stat;
data.bssid = get_hdr_bssid(hdr);
rtw_iterate_vifs_atomic(rtwdev, rtw_rx_addr_match_iter, &data);
}
void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
struct rtw_rx_pkt_stat *pkt_stat,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *rx_status,
u8 *phy_status)
{
struct ieee80211_hw *hw = rtwdev->hw;
memset(rx_status, 0, sizeof(*rx_status));
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
if (pkt_stat->crc_err)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (pkt_stat->decrypted)
rx_status->flag |= RX_FLAG_DECRYPTED;
if (pkt_stat->rate >= DESC_RATEVHT1SS_MCS0)
rx_status->encoding = RX_ENC_VHT;
else if (pkt_stat->rate >= DESC_RATEMCS0)
rx_status->encoding = RX_ENC_HT;
if (pkt_stat->rate >= DESC_RATEVHT1SS_MCS0 &&
pkt_stat->rate <= DESC_RATEVHT1SS_MCS9) {
rx_status->nss = 1;
rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT1SS_MCS0;
} else if (pkt_stat->rate >= DESC_RATEVHT2SS_MCS0 &&
pkt_stat->rate <= DESC_RATEVHT2SS_MCS9) {
rx_status->nss = 2;
rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT2SS_MCS0;
} else if (pkt_stat->rate >= DESC_RATEVHT3SS_MCS0 &&
pkt_stat->rate <= DESC_RATEVHT3SS_MCS9) {
rx_status->nss = 3;
rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT3SS_MCS0;
} else if (pkt_stat->rate >= DESC_RATEVHT4SS_MCS0 &&
pkt_stat->rate <= DESC_RATEVHT4SS_MCS9) {
rx_status->nss = 4;
rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT4SS_MCS0;
} else if (pkt_stat->rate >= DESC_RATEMCS0 &&
pkt_stat->rate <= DESC_RATEMCS15) {
rx_status->rate_idx = pkt_stat->rate - DESC_RATEMCS0;
} else if (rx_status->band == NL80211_BAND_5GHZ &&
pkt_stat->rate >= DESC_RATE6M &&
pkt_stat->rate <= DESC_RATE54M) {
rx_status->rate_idx = pkt_stat->rate - DESC_RATE6M;
} else if (rx_status->band == NL80211_BAND_2GHZ &&
pkt_stat->rate >= DESC_RATE1M &&
pkt_stat->rate <= DESC_RATE54M) {
rx_status->rate_idx = pkt_stat->rate - DESC_RATE1M;
} else {
rx_status->rate_idx = 0;
}
rx_status->flag |= RX_FLAG_MACTIME_START;
rx_status->mactime = pkt_stat->tsf_low;
if (pkt_stat->bw == RTW_CHANNEL_WIDTH_80)
rx_status->bw = RATE_INFO_BW_80;
else if (pkt_stat->bw == RTW_CHANNEL_WIDTH_40)
rx_status->bw = RATE_INFO_BW_40;
else
rx_status->bw = RATE_INFO_BW_20;
rx_status->signal = pkt_stat->signal_power;
rtw_rx_addr_match(rtwdev, pkt_stat, hdr);
}
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW_RX_H_
#define __RTW_RX_H_
#define GET_RX_DESC_PHYST(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(26))
#define GET_RX_DESC_ICV_ERR(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(15))
#define GET_RX_DESC_CRC32(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(14))
#define GET_RX_DESC_SWDEC(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(27))
#define GET_RX_DESC_C2H(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x02), BIT(28))
#define GET_RX_DESC_PKT_LEN(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(13, 0))
#define GET_RX_DESC_DRV_INFO_SIZE(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(19, 16))
#define GET_RX_DESC_SHIFT(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(25, 24))
#define GET_RX_DESC_RX_RATE(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x03), GENMASK(6, 0))
#define GET_RX_DESC_MACID(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x01), GENMASK(6, 0))
#define GET_RX_DESC_PPDU_CNT(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x02), GENMASK(30, 29))
#define GET_RX_DESC_TSFL(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x05), GENMASK(31, 0))
void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct sk_buff *skb);
void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
struct rtw_rx_pkt_stat *pkt_stat,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *rx_status,
u8 *phy_status);
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include "main.h"
#include "sec.h"
#include "reg.h"
int rtw_sec_get_free_cam(struct rtw_sec_desc *sec)
{
/* if default key search is enabled, the first 4 cam entries
* are used to direct map to group key with its key->key_idx, so
* driver should use cam entries after 4 to install pairwise key
*/
if (sec->default_key_search)
return find_next_zero_bit(sec->cam_map, RTW_MAX_SEC_CAM_NUM,
RTW_SEC_DEFAULT_KEY_NUM);
return find_first_zero_bit(sec->cam_map, RTW_MAX_SEC_CAM_NUM);
}
void rtw_sec_write_cam(struct rtw_dev *rtwdev,
struct rtw_sec_desc *sec,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key,
u8 hw_key_type, u8 hw_key_idx)
{
struct rtw_cam_entry *cam = &sec->cam_table[hw_key_idx];
u32 write_cmd;
u32 command;
u32 content;
u32 addr;
int i, j;
set_bit(hw_key_idx, sec->cam_map);
cam->valid = true;
cam->group = !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
cam->hw_key_type = hw_key_type;
cam->key = key;
if (sta)
ether_addr_copy(cam->addr, sta->addr);
else
eth_broadcast_addr(cam->addr);
write_cmd = RTW_SEC_CMD_WRITE_ENABLE | RTW_SEC_CMD_POLLING;
addr = hw_key_idx << RTW_SEC_CAM_ENTRY_SHIFT;
for (i = 5; i >= 0; i--) {
switch (i) {
case 0:
content = ((key->keyidx & 0x3)) |
((hw_key_type & 0x7) << 2) |
(cam->group << 6) |
(cam->valid << 15) |
(cam->addr[0] << 16) |
(cam->addr[1] << 24);
break;
case 1:
content = (cam->addr[2]) |
(cam->addr[3] << 8) |
(cam->addr[4] << 16) |
(cam->addr[5] << 24);
break;
default:
j = (i - 2) << 2;
content = (key->key[j]) |
(key->key[j + 1] << 8) |
(key->key[j + 2] << 16) |
(key->key[j + 3] << 24);
break;
}
command = write_cmd | (addr + i);
rtw_write32(rtwdev, RTW_SEC_WRITE_REG, content);
rtw_write32(rtwdev, RTW_SEC_CMD_REG, command);
}
}
void rtw_sec_clear_cam(struct rtw_dev *rtwdev,
struct rtw_sec_desc *sec,
u8 hw_key_idx)
{
struct rtw_cam_entry *cam = &sec->cam_table[hw_key_idx];
u32 write_cmd;
u32 command;
u32 addr;
clear_bit(hw_key_idx, sec->cam_map);
cam->valid = false;
cam->key = NULL;
eth_zero_addr(cam->addr);
write_cmd = RTW_SEC_CMD_WRITE_ENABLE | RTW_SEC_CMD_POLLING;
addr = hw_key_idx << RTW_SEC_CAM_ENTRY_SHIFT;
command = write_cmd | addr;
rtw_write32(rtwdev, RTW_SEC_WRITE_REG, 0);
rtw_write32(rtwdev, RTW_SEC_CMD_REG, command);
}
void rtw_sec_enable_sec_engine(struct rtw_dev *rtwdev)
{
struct rtw_sec_desc *sec = &rtwdev->sec;
u16 ctrl_reg;
u16 sec_config;
/* default use default key search for now */
sec->default_key_search = true;
ctrl_reg = rtw_read16(rtwdev, REG_CR);
ctrl_reg |= RTW_SEC_ENGINE_EN;
rtw_write16(rtwdev, REG_CR, ctrl_reg);
sec_config = rtw_read16(rtwdev, RTW_SEC_CONFIG);
sec_config |= RTW_SEC_TX_DEC_EN | RTW_SEC_RX_DEC_EN;
if (sec->default_key_search)
sec_config |= RTW_SEC_TX_UNI_USE_DK | RTW_SEC_RX_UNI_USE_DK |
RTW_SEC_TX_BC_USE_DK | RTW_SEC_RX_BC_USE_DK;
rtw_write16(rtwdev, RTW_SEC_CONFIG, sec_config);
}
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW_SEC_H_
#define __RTW_SEC_H_
#define RTW_SEC_CMD_REG 0x670
#define RTW_SEC_WRITE_REG 0x674
#define RTW_SEC_READ_REG 0x678
#define RTW_SEC_CONFIG 0x680
#define RTW_SEC_CAM_ENTRY_SHIFT 3
#define RTW_SEC_DEFAULT_KEY_NUM 4
#define RTW_SEC_CMD_WRITE_ENABLE BIT(16)
#define RTW_SEC_CMD_CLEAR BIT(30)
#define RTW_SEC_CMD_POLLING BIT(31)
#define RTW_SEC_TX_UNI_USE_DK BIT(0)
#define RTW_SEC_RX_UNI_USE_DK BIT(1)
#define RTW_SEC_TX_DEC_EN BIT(2)
#define RTW_SEC_RX_DEC_EN BIT(3)
#define RTW_SEC_TX_BC_USE_DK BIT(6)
#define RTW_SEC_RX_BC_USE_DK BIT(7)
#define RTW_SEC_ENGINE_EN BIT(9)
int rtw_sec_get_free_cam(struct rtw_sec_desc *sec);
void rtw_sec_write_cam(struct rtw_dev *rtwdev,
struct rtw_sec_desc *sec,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key,
u8 hw_key_type, u8 hw_key_idx);
void rtw_sec_clear_cam(struct rtw_dev *rtwdev,
struct rtw_sec_desc *sec,
u8 hw_key_idx);
void rtw_sec_enable_sec_engine(struct rtw_dev *rtwdev);
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include "main.h"
#include "tx.h"
#include "fw.h"
#include "ps.h"
static
void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
struct rtw_vif *rtwvif;
hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_data(hdr->frame_control))
return;
if (!is_broadcast_ether_addr(hdr->addr1) &&
!is_multicast_ether_addr(hdr->addr1)) {
rtwdev->stats.tx_unicast += skb->len;
rtwdev->stats.tx_cnt++;
if (vif) {
rtwvif = (struct rtw_vif *)vif->drv_priv;
rtwvif->stats.tx_unicast += skb->len;
rtwvif->stats.tx_cnt++;
if (rtwvif->stats.tx_cnt > RTW_LPS_THRESHOLD)
rtw_leave_lps_irqsafe(rtwdev, rtwvif);
}
}
}
void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
{
__le32 *txdesc = (__le32 *)skb->data;
SET_TX_DESC_TXPKTSIZE(txdesc, pkt_info->tx_pkt_size);
SET_TX_DESC_OFFSET(txdesc, pkt_info->offset);
SET_TX_DESC_PKT_OFFSET(txdesc, pkt_info->pkt_offset);
SET_TX_DESC_QSEL(txdesc, pkt_info->qsel);
SET_TX_DESC_BMC(txdesc, pkt_info->bmc);
SET_TX_DESC_RATE_ID(txdesc, pkt_info->rate_id);
SET_TX_DESC_DATARATE(txdesc, pkt_info->rate);
SET_TX_DESC_DISDATAFB(txdesc, pkt_info->dis_rate_fallback);
SET_TX_DESC_USE_RATE(txdesc, pkt_info->use_rate);
SET_TX_DESC_SEC_TYPE(txdesc, pkt_info->sec_type);
SET_TX_DESC_DATA_BW(txdesc, pkt_info->bw);
SET_TX_DESC_SW_SEQ(txdesc, pkt_info->seq);
SET_TX_DESC_MAX_AGG_NUM(txdesc, pkt_info->ampdu_factor);
SET_TX_DESC_AMPDU_DENSITY(txdesc, pkt_info->ampdu_density);
SET_TX_DESC_DATA_STBC(txdesc, pkt_info->stbc);
SET_TX_DESC_DATA_LDPC(txdesc, pkt_info->ldpc);
SET_TX_DESC_AGG_EN(txdesc, pkt_info->ampdu_en);
SET_TX_DESC_LS(txdesc, pkt_info->ls);
SET_TX_DESC_DATA_SHORT(txdesc, pkt_info->short_gi);
SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report);
SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn);
}
EXPORT_SYMBOL(rtw_tx_fill_tx_desc);
static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta)
{
u8 exp = sta->ht_cap.ampdu_factor;
/* the least ampdu factor is 8K, and the value in the tx desc is the
* max aggregation num, which represents val * 2 packets can be
* aggregated in an AMPDU, so here we should use 8/2=4 as the base
*/
return (BIT(2) << exp) - 1;
}
static u8 get_tx_ampdu_density(struct ieee80211_sta *sta)
{
return sta->ht_cap.ampdu_density;
}
static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev,
struct ieee80211_sta *sta)
{
u8 rate;
if (rtwdev->hal.rf_type == RF_2T2R && sta->ht_cap.mcs.rx_mask[1] != 0)
rate = DESC_RATEMCS15;
else
rate = DESC_RATEMCS7;
return rate;
}
static u8 get_highest_vht_tx_rate(struct rtw_dev *rtwdev,
struct ieee80211_sta *sta)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 rate;
u16 tx_mcs_map;
tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map);
if (efuse->hw_cap.nss == 1) {
switch (tx_mcs_map & 0x3) {
case IEEE80211_VHT_MCS_SUPPORT_0_7:
rate = DESC_RATEVHT1SS_MCS7;
break;
case IEEE80211_VHT_MCS_SUPPORT_0_8:
rate = DESC_RATEVHT1SS_MCS8;
break;
default:
case IEEE80211_VHT_MCS_SUPPORT_0_9:
rate = DESC_RATEVHT1SS_MCS9;
break;
}
} else if (efuse->hw_cap.nss >= 2) {
switch ((tx_mcs_map & 0xc) >> 2) {
case IEEE80211_VHT_MCS_SUPPORT_0_7:
rate = DESC_RATEVHT2SS_MCS7;
break;
case IEEE80211_VHT_MCS_SUPPORT_0_8:
rate = DESC_RATEVHT2SS_MCS8;
break;
default:
case IEEE80211_VHT_MCS_SUPPORT_0_9:
rate = DESC_RATEVHT2SS_MCS9;
break;
}
} else {
rate = DESC_RATEVHT1SS_MCS9;
}
return rate;
}
static void rtw_tx_report_enable(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info)
{
struct rtw_tx_report *tx_report = &rtwdev->tx_report;
/* [11:8], reserved, fills with zero
* [7:2], tx report sequence number
* [1:0], firmware use, fills with zero
*/
pkt_info->sn = (atomic_inc_return(&tx_report->sn) << 2) & 0xfc;
pkt_info->report = true;
}
void rtw_tx_report_purge_timer(struct timer_list *t)
{
struct rtw_dev *rtwdev = from_timer(rtwdev, t, tx_report.purge_timer);
struct rtw_tx_report *tx_report = &rtwdev->tx_report;
unsigned long flags;
if (skb_queue_len(&tx_report->queue) == 0)
return;
WARN(1, "purge skb(s) not reported by firmware\n");
spin_lock_irqsave(&tx_report->q_lock, flags);
skb_queue_purge(&tx_report->queue);
spin_unlock_irqrestore(&tx_report->q_lock, flags);
}
void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn)
{
struct rtw_tx_report *tx_report = &rtwdev->tx_report;
unsigned long flags;
u8 *drv_data;
/* pass sn to tx report handler through driver data */
drv_data = (u8 *)IEEE80211_SKB_CB(skb)->status.status_driver_data;
*drv_data = sn;
spin_lock_irqsave(&tx_report->q_lock, flags);
__skb_queue_tail(&tx_report->queue, skb);
spin_unlock_irqrestore(&tx_report->q_lock, flags);
mod_timer(&tx_report->purge_timer, jiffies + RTW_TX_PROBE_TIMEOUT);
}
EXPORT_SYMBOL(rtw_tx_report_enqueue);
static void rtw_tx_report_tx_status(struct rtw_dev *rtwdev,
struct sk_buff *skb, bool acked)
{
struct ieee80211_tx_info *info;
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
if (acked)
info->flags |= IEEE80211_TX_STAT_ACK;
else
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(rtwdev->hw, skb);
}
void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
{
struct rtw_tx_report *tx_report = &rtwdev->tx_report;
struct rtw_c2h_cmd *c2h;
struct sk_buff *cur, *tmp;
unsigned long flags;
u8 sn, st;
u8 *n;
c2h = get_c2h_from_skb(skb);
sn = GET_CCX_REPORT_SEQNUM(c2h->payload);
st = GET_CCX_REPORT_STATUS(c2h->payload);
spin_lock_irqsave(&tx_report->q_lock, flags);
skb_queue_walk_safe(&tx_report->queue, cur, tmp) {
n = (u8 *)IEEE80211_SKB_CB(cur)->status.status_driver_data;
if (*n == sn) {
__skb_unlink(cur, &tx_report->queue);
rtw_tx_report_tx_status(rtwdev, cur, st == 0);
break;
}
}
spin_unlock_irqrestore(&tx_report->q_lock, flags);
}
static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
pkt_info->use_rate = true;
pkt_info->rate_id = 6;
pkt_info->dis_rate_fallback = true;
}
static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ieee80211_sta *sta = control->sta;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct rtw_sta_info *si;
u16 seq;
u8 ampdu_factor = 0;
u8 ampdu_density = 0;
bool ampdu_en = false;
u8 rate = DESC_RATE6M;
u8 rate_id = 6;
u8 bw = RTW_CHANNEL_WIDTH_20;
bool stbc = false;
bool ldpc = false;
seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
/* for broadcast/multicast, use default values */
if (!sta)
goto out;
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
ampdu_en = true;
ampdu_factor = get_tx_ampdu_factor(sta);
ampdu_density = get_tx_ampdu_density(sta);
}
if (sta->vht_cap.vht_supported)
rate = get_highest_vht_tx_rate(rtwdev, sta);
else if (sta->ht_cap.ht_supported)
rate = get_highest_ht_tx_rate(rtwdev, sta);
else if (sta->supp_rates[0] <= 0xf)
rate = DESC_RATE11M;
else
rate = DESC_RATE54M;
si = (struct rtw_sta_info *)sta->drv_priv;
bw = si->bw_mode;
rate_id = si->rate_id;
stbc = si->stbc_en;
ldpc = si->ldpc_en;
out:
pkt_info->seq = seq;
pkt_info->ampdu_factor = ampdu_factor;
pkt_info->ampdu_density = ampdu_density;
pkt_info->ampdu_en = ampdu_en;
pkt_info->rate = rate;
pkt_info->rate_id = rate_id;
pkt_info->bw = bw;
pkt_info->stbc = stbc;
pkt_info->ldpc = ldpc;
}
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rtw_sta_info *si;
struct ieee80211_vif *vif = NULL;
__le16 fc = hdr->frame_control;
u8 sec_type = 0;
bool bmc;
if (control->sta) {
si = (struct rtw_sta_info *)control->sta->drv_priv;
vif = si->vif;
}
if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
rtw_tx_mgmt_pkt_info_update(rtwdev, pkt_info, control, skb);
else if (ieee80211_is_data(fc))
rtw_tx_data_pkt_info_update(rtwdev, pkt_info, control, skb);
if (info->control.hw_key) {
struct ieee80211_key_conf *key = info->control.hw_key;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
sec_type = 0x01;
break;
case WLAN_CIPHER_SUITE_CCMP:
sec_type = 0x03;
break;
default:
break;
}
}
bmc = is_broadcast_ether_addr(hdr->addr1) ||
is_multicast_ether_addr(hdr->addr1);
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
rtw_tx_report_enable(rtwdev, pkt_info);
pkt_info->bmc = bmc;
pkt_info->sec_type = sec_type;
pkt_info->tx_pkt_size = skb->len;
pkt_info->offset = chip->tx_pkt_desc_sz;
pkt_info->qsel = skb->priority;
pkt_info->ls = true;
/* maybe merge with tx status ? */
rtw_tx_stats(rtwdev, vif, skb);
}
void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool bmc;
bmc = is_broadcast_ether_addr(hdr->addr1) ||
is_multicast_ether_addr(hdr->addr1);
pkt_info->use_rate = true;
pkt_info->rate_id = 6;
pkt_info->dis_rate_fallback = true;
pkt_info->bmc = bmc;
pkt_info->tx_pkt_size = skb->len;
pkt_info->offset = chip->tx_pkt_desc_sz;
pkt_info->qsel = skb->priority;
pkt_info->ls = true;
}
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW_TX_H_
#define __RTW_TX_H_
#define RTK_TX_MAX_AGG_NUM_MASK 0x1f
#define RTW_TX_PROBE_TIMEOUT msecs_to_jiffies(500)
#define SET_TX_DESC_TXPKTSIZE(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, GENMASK(15, 0))
#define SET_TX_DESC_OFFSET(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, GENMASK(23, 16))
#define SET_TX_DESC_PKT_OFFSET(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x01, value, GENMASK(28, 24))
#define SET_TX_DESC_QSEL(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x01, value, GENMASK(12, 8))
#define SET_TX_DESC_BMC(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, BIT(24))
#define SET_TX_DESC_RATE_ID(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x01, value, GENMASK(20, 16))
#define SET_TX_DESC_DATARATE(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x04, value, GENMASK(6, 0))
#define SET_TX_DESC_DISDATAFB(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, BIT(10))
#define SET_TX_DESC_USE_RATE(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, BIT(8))
#define SET_TX_DESC_SEC_TYPE(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x01, value, GENMASK(23, 22))
#define SET_TX_DESC_DATA_BW(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, GENMASK(6, 5))
#define SET_TX_DESC_SW_SEQ(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, GENMASK(23, 12))
#define SET_TX_DESC_MAX_AGG_NUM(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(21, 17))
#define SET_TX_DESC_AMPDU_DENSITY(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, GENMASK(22, 20))
#define SET_TX_DESC_DATA_STBC(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, GENMASK(9, 8))
#define SET_TX_DESC_DATA_LDPC(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, BIT(7))
#define SET_TX_DESC_AGG_EN(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, BIT(12))
#define SET_TX_DESC_LS(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, BIT(26))
#define SET_TX_DESC_DATA_SHORT(txdesc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, BIT(4))
#define SET_TX_DESC_SPE_RPT(tx_desc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, BIT(19))
#define SET_TX_DESC_SW_DEFINE(tx_desc, value) \
le32p_replace_bits((__le32 *)(txdesc) + 0x06, value, GENMASK(11, 0))
enum rtw_tx_desc_queue_select {
TX_DESC_QSEL_TID0 = 0,
TX_DESC_QSEL_TID1 = 1,
TX_DESC_QSEL_TID2 = 2,
TX_DESC_QSEL_TID3 = 3,
TX_DESC_QSEL_TID4 = 4,
TX_DESC_QSEL_TID5 = 5,
TX_DESC_QSEL_TID6 = 6,
TX_DESC_QSEL_TID7 = 7,
TX_DESC_QSEL_TID8 = 8,
TX_DESC_QSEL_TID9 = 9,
TX_DESC_QSEL_TID10 = 10,
TX_DESC_QSEL_TID11 = 11,
TX_DESC_QSEL_TID12 = 12,
TX_DESC_QSEL_TID13 = 13,
TX_DESC_QSEL_TID14 = 14,
TX_DESC_QSEL_TID15 = 15,
TX_DESC_QSEL_BEACON = 16,
TX_DESC_QSEL_HIGH = 17,
TX_DESC_QSEL_MGMT = 18,
TX_DESC_QSEL_H2C = 19,
};
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct ieee80211_tx_control *control,
struct sk_buff *skb);
void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb);
void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn);
void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb);
#endif
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#include "main.h"
#include "util.h"
#include "reg.h"
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target)
{
u32 cnt;
for (cnt = 0; cnt < 1000; cnt++) {
if (rtw_read32_mask(rtwdev, addr, mask) == target)
return true;
udelay(10);
}
return false;
}
bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val)
{
if (!check_hw_ready(rtwdev, LTECOEX_ACCESS_CTRL, LTECOEX_READY, 1))
return false;
rtw_write32(rtwdev, LTECOEX_ACCESS_CTRL, 0x800F0000 | offset);
*val = rtw_read32(rtwdev, LTECOEX_READ_DATA);
return true;
}
bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value)
{
if (!check_hw_ready(rtwdev, LTECOEX_ACCESS_CTRL, LTECOEX_READY, 1))
return false;
rtw_write32(rtwdev, LTECOEX_WRITE_DATA, value);
rtw_write32(rtwdev, LTECOEX_ACCESS_CTRL, 0xC00F0000 | offset);
return true;
}
void rtw_restore_reg(struct rtw_dev *rtwdev,
struct rtw_backup_info *bckp, u32 num)
{
u8 len;
u32 reg;
u32 val;
int i;
for (i = 0; i < num; i++, bckp++) {
len = bckp->len;
reg = bckp->reg;
val = bckp->val;
switch (len) {
case 1:
rtw_write8(rtwdev, reg, (u8)val);
break;
case 2:
rtw_write16(rtwdev, reg, (u16)val);
break;
case 4:
rtw_write32(rtwdev, reg, (u32)val);
break;
default:
break;
}
}
}
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Copyright(c) 2018-2019 Realtek Corporation
*/
#ifndef __RTW_UTIL_H__
#define __RTW_UTIL_H__
struct rtw_dev;
#define rtw_iterate_vifs(rtwdev, iterator, data) \
ieee80211_iterate_active_interfaces(rtwdev->hw, \
IEEE80211_IFACE_ITER_NORMAL, iterator, data)
#define rtw_iterate_vifs_atomic(rtwdev, iterator, data) \
ieee80211_iterate_active_interfaces_atomic(rtwdev->hw, \
IEEE80211_IFACE_ITER_NORMAL, iterator, data)
#define rtw_iterate_stas_atomic(rtwdev, iterator, data) \
ieee80211_iterate_stations_atomic(rtwdev->hw, iterator, data)
static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
{
__le16 fc = hdr->frame_control;
u8 *bssid;
if (ieee80211_has_tods(fc))
bssid = hdr->addr1;
else if (ieee80211_has_fromds(fc))
bssid = hdr->addr2;
else
bssid = hdr->addr3;
return bssid;
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment