Commit bf2320a6 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-atlantic-A2-support'

Igor Russkikh says:

====================
net: atlantic: A2 support

This patchset adds support for the new generation of Atlantic NICs.

Chip generations are mostly compatible register-wise, but there are still
some differences. Therefore we've made some of first generation (A1) code
non-static to re-use it where possible.

Some pieces are A2 specific, in which case we redefine/extend such APIs.

v2:
 * removed #pragma pack (2 structures require the packed attribute);
 * use defines instead of magic numbers where possible;

v1: https://patchwork.ozlabs.org/cover/1276220/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e00edb4e 43c670c8
......@@ -25,6 +25,10 @@ atlantic-objs := aq_main.o \
hw_atl/hw_atl_utils.o \
hw_atl/hw_atl_utils_fw2x.o \
hw_atl/hw_atl_llh.o \
hw_atl2/hw_atl2.o \
hw_atl2/hw_atl2_utils.o \
hw_atl2/hw_atl2_utils_fw.o \
hw_atl2/hw_atl2_llh.o \
macsec/macsec_api.o
atlantic-$(CONFIG_MACSEC) += aq_macsec.o
......
......@@ -80,8 +80,8 @@
#define AQ_CFG_LOCK_TRYS 100U
#define AQ_CFG_DRV_AUTHOR "aQuantia"
#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver"
#define AQ_CFG_DRV_AUTHOR "Marvell"
#define AQ_CFG_DRV_DESC "Marvell (Aquantia) Corporation(R) Network Driver"
#define AQ_CFG_DRV_NAME "atlantic"
#endif /* AQ_CFG_H */
......@@ -37,22 +37,31 @@
#define AQ_DEVICE_ID_AQC111S 0x91B1
#define AQ_DEVICE_ID_AQC112S 0x92B1
#define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter"
#define AQ_DEVICE_ID_AQC113DEV 0x00C0
#define AQ_DEVICE_ID_AQC113CS 0x94C0
#define AQ_DEVICE_ID_AQC114CS 0x93C0
#define AQ_DEVICE_ID_AQC113 0x04C0
#define AQ_DEVICE_ID_AQC113C 0x14C0
#define AQ_DEVICE_ID_AQC115C 0x12C0
#define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
#define AQ_HWREV_ANY 0
#define AQ_HWREV_1 1
#define AQ_HWREV_2 2
#define AQ_NIC_RATE_10G BIT(0)
#define AQ_NIC_RATE_5G BIT(1)
#define AQ_NIC_RATE_5GSR BIT(2)
#define AQ_NIC_RATE_2GS BIT(3)
#define AQ_NIC_RATE_1G BIT(4)
#define AQ_NIC_RATE_100M BIT(5)
#define AQ_NIC_RATE_EEE_10G BIT(6)
#define AQ_NIC_RATE_EEE_5G BIT(7)
#define AQ_NIC_RATE_EEE_2GS BIT(8)
#define AQ_NIC_RATE_EEE_1G BIT(9)
#define AQ_NIC_RATE_10G BIT(0)
#define AQ_NIC_RATE_5G BIT(1)
#define AQ_NIC_RATE_5GSR BIT(2)
#define AQ_NIC_RATE_2GS BIT(3)
#define AQ_NIC_RATE_1G BIT(4)
#define AQ_NIC_RATE_100M BIT(5)
#define AQ_NIC_RATE_10M BIT(6)
#define AQ_NIC_RATE_EEE_10G BIT(7)
#define AQ_NIC_RATE_EEE_5G BIT(8)
#define AQ_NIC_RATE_EEE_2GS BIT(9)
#define AQ_NIC_RATE_EEE_1G BIT(10)
#define AQ_NIC_RATE_EEE_100M BIT(11)
#endif /* AQ_COMMON_H */
......@@ -611,6 +611,9 @@ static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed)
if (speed & AQ_NIC_RATE_EEE_1G)
rate |= SUPPORTED_1000baseT_Full;
if (speed & AQ_NIC_RATE_EEE_100M)
rate |= SUPPORTED_100baseT_Full;
return rate;
}
......
......@@ -55,6 +55,7 @@ struct aq_hw_caps_s {
u8 rx_rings;
bool flow_control;
bool is_64_dma;
u32 priv_data_len;
};
struct aq_hw_link_status_s {
......@@ -136,6 +137,19 @@ enum aq_priv_flags {
BIT(AQ_HW_LOOPBACK_PHYINT_SYS) |\
BIT(AQ_HW_LOOPBACK_PHYEXT_SYS))
#define ATL_HW_CHIP_MIPS 0x00000001U
#define ATL_HW_CHIP_TPO2 0x00000002U
#define ATL_HW_CHIP_RPF2 0x00000004U
#define ATL_HW_CHIP_MPI_AQ 0x00000010U
#define ATL_HW_CHIP_ATLANTIC 0x00800000U
#define ATL_HW_CHIP_REVISION_A0 0x01000000U
#define ATL_HW_CHIP_REVISION_B0 0x02000000U
#define ATL_HW_CHIP_REVISION_B1 0x04000000U
#define ATL_HW_CHIP_ANTIGUA 0x08000000U
#define ATL_HW_IS_CHIP_FEATURE(_HW_, _F_) (!!(ATL_HW_CHIP_##_F_ & \
(_HW_)->chip_features))
struct aq_hw_s {
atomic_t flags;
u8 rbl_enabled:1;
......@@ -159,6 +173,7 @@ struct aq_hw_s {
struct hw_atl_utils_fw_rpc rpc;
s64 ptp_clk_offset;
u16 phy_id;
void *priv;
};
struct aq_ring_s;
......@@ -182,6 +197,11 @@ struct aq_hw_ops {
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
int (*hw_soft_reset)(struct aq_hw_s *self);
int (*hw_prepare)(struct aq_hw_s *self,
const struct aq_fw_ops **fw_ops);
int (*hw_reset)(struct aq_hw_s *self);
int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
......@@ -254,7 +274,7 @@ struct aq_hw_ops {
struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
u32 (*hw_get_fw_version)(struct aq_hw_s *self);
int (*hw_set_offload)(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
......
......@@ -257,6 +257,20 @@ static void aq_nic_polling_timer_cb(struct timer_list *t)
AQ_CFG_POLLING_TIMER_INTERVAL);
}
static int aq_nic_hw_prepare(struct aq_nic_s *self)
{
int err = 0;
err = self->aq_hw_ops->hw_soft_reset(self->aq_hw);
if (err)
goto exit;
err = self->aq_hw_ops->hw_prepare(self->aq_hw, &self->aq_fw_ops);
exit:
return err;
}
int aq_nic_ndev_register(struct aq_nic_s *self)
{
int err = 0;
......@@ -266,7 +280,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
goto err_exit;
}
err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
err = aq_nic_hw_prepare(self);
if (err)
goto err_exit;
......@@ -364,7 +378,8 @@ int aq_nic_init(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ATLANTIC) &&
self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
err = aq_phy_init(self->aq_hw);
}
......@@ -764,6 +779,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
u32 *regs_buff = p;
int err = 0;
if (unlikely(!self->aq_hw_ops->hw_get_regs))
return -EOPNOTSUPP;
regs->version = 1;
err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
......@@ -778,6 +796,9 @@ int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
int aq_nic_get_regs_count(struct aq_nic_s *self)
{
if (unlikely(!self->aq_hw_ops->hw_get_regs))
return 0;
return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
}
......@@ -885,6 +906,10 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, supported,
100baseT_Full);
if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M)
ethtool_link_ksettings_add_link_mode(cmd, supported,
10baseT_Full);
if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
ethtool_link_ksettings_add_link_mode(cmd, supported,
Pause);
......@@ -924,6 +949,10 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
ethtool_link_ksettings_add_link_mode(cmd, advertising,
100baseT_Full);
if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
10baseT_Full);
if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
......@@ -954,6 +983,10 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
speed = cmd->base.speed;
switch (speed) {
case SPEED_10:
rate = AQ_NIC_RATE_10M;
break;
case SPEED_100:
rate = AQ_NIC_RATE_100M;
break;
......@@ -1006,11 +1039,7 @@ struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
u32 aq_nic_get_fw_version(struct aq_nic_s *self)
{
u32 fw_version = 0U;
self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
return fw_version;
return self->aq_hw_ops->hw_get_fw_version(self->aq_hw);
}
int aq_nic_set_loopback(struct aq_nic_s *self)
......
......@@ -16,6 +16,7 @@
#include "aq_pci_func.h"
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
#include "hw_atl2/hw_atl2.h"
#include "aq_filters.h"
#include "aq_drvinfo.h"
#include "aq_macsec.h"
......@@ -41,6 +42,13 @@ static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113DEV), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CS), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC114CS), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
{}
};
......@@ -70,6 +78,13 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
{ AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
{ AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
{ AQ_DEVICE_ID_AQC113DEV, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC113, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
......@@ -104,10 +119,8 @@ int aq_pci_func_init(struct pci_dev *pdev)
int err;
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err) {
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
}
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
......@@ -237,6 +250,15 @@ static int aq_pci_probe(struct pci_dev *pdev,
goto err_ioremap;
}
self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
if (self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len) {
int len = self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len;
self->aq_hw->priv = kzalloc(len, GFP_KERNEL);
if (!self->aq_hw->priv) {
err = -ENOMEM;
goto err_free_aq_hw;
}
}
for (bar = 0; bar < 4; ++bar) {
if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
......@@ -245,19 +267,19 @@ static int aq_pci_probe(struct pci_dev *pdev,
mmio_pa = pci_resource_start(pdev, bar);
if (mmio_pa == 0U) {
err = -EIO;
goto err_free_aq_hw;
goto err_free_aq_hw_priv;
}
reg_sz = pci_resource_len(pdev, bar);
if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
err = -EIO;
goto err_free_aq_hw;
goto err_free_aq_hw_priv;
}
self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
if (!self->aq_hw->mmio) {
err = -EIO;
goto err_free_aq_hw;
goto err_free_aq_hw_priv;
}
break;
}
......@@ -265,7 +287,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
if (bar == 4) {
err = -EIO;
goto err_free_aq_hw;
goto err_free_aq_hw_priv;
}
numvecs = min((u8)AQ_CFG_VECS_DEF,
......@@ -305,6 +327,8 @@ static int aq_pci_probe(struct pci_dev *pdev,
aq_pci_free_irq_vectors(self);
err_hwinit:
iounmap(self->aq_hw->mmio);
err_free_aq_hw_priv:
kfree(self->aq_hw->priv);
err_free_aq_hw:
kfree(self->aq_hw);
err_ioremap:
......@@ -332,6 +356,7 @@ static void aq_pci_remove(struct pci_dev *pdev)
aq_nic_free_vectors(self);
aq_pci_free_irq_vectors(self);
iounmap(self->aq_hw->mmio);
kfree(self->aq_hw->priv);
kfree(self->aq_hw);
pci_release_regions(pdev);
free_netdev(self->ndev);
......
......@@ -267,7 +267,7 @@ static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
aq_hw_write_reg(self, 0x00007040U, ATL_HW_IS_CHIP_FEATURE(self, TPO2) ?
0x00010000U : 0x00000000U);
hw_atl_tdm_tx_dca_en_set(self, 0U);
hw_atl_tdm_tx_dca_mode_set(self, 0U);
......@@ -886,6 +886,8 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
}
const struct aq_hw_ops hw_atl_ops_a0 = {
.hw_soft_reset = hw_atl_utils_soft_reset,
.hw_prepare = hw_atl_utils_initfw,
.hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
.hw_init = hw_atl_a0_hw_init,
.hw_reset = hw_atl_a0_hw_reset,
......
......@@ -187,8 +187,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
unsigned int addr = 0U;
......@@ -215,8 +215,8 @@ static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
return err;
}
static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
u8 *indirection_table = rss_params->indirection_table;
......@@ -251,9 +251,10 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
return err;
}
static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg)
int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg)
{
u64 rxcsum = !!(aq_nic_cfg->features & NETIF_F_RXCSUM);
unsigned int i;
/* TX checksums offloads*/
......@@ -261,10 +262,8 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
/* RX checksums offloads*/
hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
NETIF_F_RXCSUM));
hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
NETIF_F_RXCSUM));
hw_atl_rpo_ipv4header_crc_offload_en_set(self, rxcsum);
hw_atl_rpo_tcp_udp_crc_offload_en_set(self, rxcsum);
/* LSO offloads*/
hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
......@@ -272,7 +271,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
/* Outer VLAN tag offload */
hw_atl_rpo_outer_vlan_tag_mode_set(self, 1U);
/* LRO offloads */
/* LRO offloads */
{
unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
......@@ -314,7 +313,7 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
/* Tx TC/Queue number config */
hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
hw_atl_tpb_tps_tx_tc_mode_set(self, 1U);
hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
......@@ -324,7 +323,7 @@ static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
aq_hw_write_reg(self, 0x00007040U, ATL_HW_IS_CHIP_FEATURE(self, TPO2) ?
0x00010000U : 0x00000000U);
hw_atl_tdm_tx_dca_en_set(self, 0U);
hw_atl_tdm_tx_dca_mode_set(self, 0U);
......@@ -372,8 +371,8 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00005040U,
IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
aq_hw_write_reg(self, 0x00005040U, ATL_HW_IS_CHIP_FEATURE(self, RPF2) ?
0x000F0000U : 0x00000000U);
hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
......@@ -384,7 +383,7 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
unsigned int h = 0U;
unsigned int l = 0U;
......@@ -479,23 +478,21 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
return err;
}
static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_start(struct aq_hw_s *self)
int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
hw_atl_tpb_tx_buff_en_set(self, 1);
hw_atl_rpb_rx_buff_en_set(self, 1);
......@@ -511,9 +508,8 @@ static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
return 0;
}
static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
struct aq_ring_s *ring,
unsigned int frags)
int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_s *ring,
unsigned int frags)
{
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
......@@ -600,9 +596,8 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 vlan_rx_stripping = self->aq_nic_cfg->is_vlan_rx_strip;
......@@ -643,9 +638,8 @@ static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
......@@ -673,9 +667,8 @@ static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
struct aq_ring_s *ring,
unsigned int sw_tail_old)
int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, struct aq_ring_s *ring,
unsigned int sw_tail_old)
{
for (; sw_tail_old != ring->sw_tail;
sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
......@@ -734,8 +727,8 @@ static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
unsigned int hw_head_;
int err = 0;
......@@ -753,8 +746,7 @@ static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
return err;
}
static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
struct aq_ring_s *ring)
int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring)
{
for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
......@@ -854,14 +846,14 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask));
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
......@@ -871,7 +863,7 @@ static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = hw_atl_itr_irq_statuslsw_get(self);
......@@ -880,8 +872,8 @@ static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
unsigned int packet_filter)
int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
unsigned int packet_filter)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
unsigned int i = 0U;
......@@ -1071,16 +1063,14 @@ static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
return err;
}
static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
{
hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
......@@ -1089,7 +1079,7 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
{
*tc_mode = hw_atl_rpb_tps_tx_tc_mode_get(self);
*tc_mode = hw_atl_tpb_tps_tx_tc_mode_get(self);
return aq_hw_err_from_flags(self);
}
......@@ -1478,6 +1468,8 @@ static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
}
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_soft_reset = hw_atl_utils_soft_reset,
.hw_prepare = hw_atl_utils_initfw,
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
.hw_reset = hw_atl_b0_hw_reset,
......
......@@ -33,4 +33,41 @@ extern const struct aq_hw_ops hw_atl_ops_b0;
#define hw_atl_ops_b1 hw_atl_ops_b0
int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params);
int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params);
int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self, struct aq_ring_s *ring);
int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self, struct aq_ring_s *ring);
int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param);
int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self, struct aq_ring_s *ring,
unsigned int sw_tail_old);
int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring);
int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param);
int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, struct aq_ring_s *ring,
unsigned int frags);
int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring);
int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr);
int hw_atl_b0_hw_start(struct aq_hw_s *self);
int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask);
int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask);
int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask);
int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
unsigned int packet_filter);
#endif /* HW_ATL_B0_H */
......@@ -693,6 +693,13 @@ void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
HW_ATL_RPFL2MC_ENF_SHIFT, l2multicast_flr_en);
}
u32 hw_atl_rpfl2promiscuous_mode_en_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPFL2PROMIS_MODE_ADR,
HW_ATL_RPFL2PROMIS_MODE_MSK,
HW_ATL_RPFL2PROMIS_MODE_SHIFT);
}
void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
u32 l2promiscuous_mode_en)
{
......@@ -867,6 +874,13 @@ void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
vlan_prom_mode_en);
}
u32 hw_atl_rpf_vlan_prom_mode_en_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL_RPF_VL_PROMIS_MODE_ADR,
HW_ATL_RPF_VL_PROMIS_MODE_MSK,
HW_ATL_RPF_VL_PROMIS_MODE_SHIFT);
}
void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
u32 vlan_acc_untagged_packets)
{
......@@ -1304,14 +1318,14 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
}
u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
u32 hw_atl_tpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
HW_ATL_TPB_TX_TC_MODE_MSK,
HW_ATL_TPB_TX_TC_MODE_SHIFT);
}
void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
void hw_atl_tpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
......
......@@ -349,6 +349,9 @@ void hw_atl_rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw,
u32 l2multicast_flr_en,
u32 filter);
/* get l2 promiscuous mode enable */
u32 hw_atl_rpfl2promiscuous_mode_en_get(struct aq_hw_s *aq_hw);
/* set l2 promiscuous mode enable */
void hw_atl_rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
u32 l2promiscuous_mode_en);
......@@ -420,6 +423,9 @@ void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw,
u32 vlan_prom_mode_en);
/* Get VLAN promiscuous mode enable */
u32 hw_atl_rpf_vlan_prom_mode_en_get(struct aq_hw_s *aq_hw);
/* Set VLAN untagged action */
void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw,
u32 vlan_untagged_act);
......@@ -610,11 +616,11 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
/* tpb */
/* set TX Traffic Class Mode */
void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
void hw_atl_tpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
u32 tx_traf_class_mode);
/* get TX Traffic Class Mode */
u32 hw_atl_rpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
u32 hw_atl_tpb_tps_tx_tc_mode_get(struct aq_hw_s *aq_hw);
/* set tx buffer enable */
void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
......
......@@ -53,7 +53,6 @@ enum mcp_area {
MCP_AREA_SETTINGS = 0x20000000,
};
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state);
static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
......@@ -67,14 +66,10 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
{
int err = 0;
err = hw_atl_utils_soft_reset(self);
if (err)
return err;
hw_atl_utils_hw_chip_features_init(self,
&self->chip_features);
hw_atl_utils_get_fw_version(self, &self->fw_ver_actual);
self->fw_ver_actual = hw_atl_utils_get_fw_version(self);
if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
self->fw_ver_actual) == 0) {
......@@ -313,7 +308,7 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
for (++cnt; --cnt && !err;) {
aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
if (IS_CHIP_FEATURE(REVISION_B1))
if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
err = readx_poll_timeout_atomic(hw_atl_utils_mif_addr_get,
self, val, val != a,
1U, 1000U);
......@@ -409,7 +404,7 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p,
if (err < 0)
goto err_exit;
if (IS_CHIP_FEATURE(REVISION_B1))
if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area);
else
err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt);
......@@ -438,7 +433,7 @@ int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
p, cnt, MCP_AREA_SETTINGS);
}
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
{
const u32 dw_major_mask = 0xff000000U;
const u32 dw_minor_mask = 0x00ffffffU;
......@@ -501,7 +496,7 @@ int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
int err = 0;
if (!IS_CHIP_FEATURE(MIPS)) {
if (!ATL_HW_IS_CHIP_FEATURE(self, MIPS)) {
err = -1;
goto err_exit;
}
......@@ -607,7 +602,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
if (err < 0)
goto err_exit;
if (IS_CHIP_FEATURE(REVISION_A0)) {
if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_A0)) {
unsigned int mtu = self->aq_nic_cfg ?
self->aq_nic_cfg->mtu : 1514U;
pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
......@@ -806,22 +801,24 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
u32 mif_rev = val & 0xFFU;
u32 chip_features = 0U;
chip_features |= ATL_HW_CHIP_ATLANTIC;
if ((0xFU & mif_rev) == 1U) {
chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
HAL_ATLANTIC_UTILS_CHIP_MIPS;
chip_features |= ATL_HW_CHIP_REVISION_A0 |
ATL_HW_CHIP_MPI_AQ |
ATL_HW_CHIP_MIPS;
} else if ((0xFU & mif_rev) == 2U) {
chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
HAL_ATLANTIC_UTILS_CHIP_MIPS |
HAL_ATLANTIC_UTILS_CHIP_TPO2 |
HAL_ATLANTIC_UTILS_CHIP_RPF2;
chip_features |= ATL_HW_CHIP_REVISION_B0 |
ATL_HW_CHIP_MPI_AQ |
ATL_HW_CHIP_MIPS |
ATL_HW_CHIP_TPO2 |
ATL_HW_CHIP_RPF2;
} else if ((0xFU & mif_rev) == 0xAU) {
chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 |
HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
HAL_ATLANTIC_UTILS_CHIP_MIPS |
HAL_ATLANTIC_UTILS_CHIP_TPO2 |
HAL_ATLANTIC_UTILS_CHIP_RPF2;
chip_features |= ATL_HW_CHIP_REVISION_B1 |
ATL_HW_CHIP_MPI_AQ |
ATL_HW_CHIP_MIPS |
ATL_HW_CHIP_TPO2 |
ATL_HW_CHIP_RPF2;
}
*p = chip_features;
......@@ -919,11 +916,9 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
return 0;
}
int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self)
{
*fw_version = aq_hw_read_reg(self, 0x18U);
return 0;
return aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
}
static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
......
......@@ -360,6 +360,8 @@ struct aq_rx_filter_vlan {
u8 queue;
};
#define HW_ATL_VLAN_MAX_FILTERS 16U
struct aq_rx_filter_l2 {
s8 queue;
u8 location;
......@@ -406,17 +408,6 @@ enum hw_atl_rx_ctrl_registers_l3l4 {
#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \
((location) - AQ_RX_FIRST_LOC_FL3L4)
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U
#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U
#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 0x04000000U
#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
self->chip_features)
enum hal_atl_utils_fw_state_e {
MPI_DEINIT = 0,
MPI_RESET = 1,
......@@ -622,7 +613,7 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self);
int hw_atl_utils_update_stats(struct aq_hw_s *self);
......@@ -643,6 +634,8 @@ int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size);
int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
struct hw_atl_utils_fw_rpc **rpc);
int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
extern const struct aq_fw_ops aq_fw_1x_ops;
extern const struct aq_fw_ops aq_fw_2x_ops;
......
// SPDX-License-Identifier: GPL-2.0-only
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#include "aq_hw.h"
#include "aq_hw_utils.h"
#include "aq_ring.h"
#include "aq_nic.h"
#include "hw_atl/hw_atl_b0.h"
#include "hw_atl/hw_atl_utils.h"
#include "hw_atl/hw_atl_llh.h"
#include "hw_atl2_utils.h"
#include "hw_atl2_llh.h"
#include "hw_atl2_internal.h"
#include "hw_atl2_llh_internal.h"
static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
u32 tag, u32 mask, u32 action);
#define DEFAULT_BOARD_BASIC_CAPABILITIES \
.is_64_dma = true, \
.msix_irqs = 8U, \
.irq_mask = ~0U, \
.vecs = HW_ATL2_RSS_MAX, \
.tcs = HW_ATL2_TC_MAX, \
.rxd_alignment = 1U, \
.rxd_size = HW_ATL2_RXD_SIZE, \
.rxds_max = HW_ATL2_MAX_RXD, \
.rxds_min = HW_ATL2_MIN_RXD, \
.txd_alignment = 1U, \
.txd_size = HW_ATL2_TXD_SIZE, \
.txds_max = HW_ATL2_MAX_TXD, \
.txds_min = HW_ATL2_MIN_TXD, \
.txhwb_alignment = 4096U, \
.tx_rings = HW_ATL2_TX_RINGS, \
.rx_rings = HW_ATL2_RX_RINGS, \
.hw_features = NETIF_F_HW_CSUM | \
NETIF_F_RXCSUM | \
NETIF_F_RXHASH | \
NETIF_F_SG | \
NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_LRO | \
NETIF_F_NTUPLE | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_GSO_UDP_L4 | \
NETIF_F_GSO_PARTIAL, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL2_MTU_JUMBO, \
.mac_regs_count = 72, \
.hw_alive_check_addr = 0x10U, \
.priv_data_len = sizeof(struct hw_atl2_priv)
const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
DEFAULT_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_TP,
.link_speed_msk = AQ_NIC_RATE_10G |
AQ_NIC_RATE_5G |
AQ_NIC_RATE_2GS |
AQ_NIC_RATE_1G |
AQ_NIC_RATE_100M |
AQ_NIC_RATE_10M,
};
static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
{
return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL2_FW_SM_ACT_RSLVR);
}
static int hw_atl2_hw_reset(struct aq_hw_s *self)
{
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
int err;
err = hw_atl2_utils_soft_reset(self);
if (err)
return err;
memset(priv, 0, sizeof(*priv));
self->aq_fw_ops->set_state(self, MPI_RESET);
err = aq_hw_err_from_flags(self);
return err;
}
static int hw_atl2_hw_queue_to_tc_map_set(struct aq_hw_s *self)
{
if (!hw_atl_rpb_rpf_rx_traf_class_mode_get(self)) {
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(0), 0x11110000);
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(8), 0x33332222);
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(16), 0x55554444);
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(24), 0x77776666);
} else {
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(0), 0x00000000);
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(8), 0x11111111);
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(16), 0x22222222);
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(24), 0x33333333);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl2_hw_qos_set(struct aq_hw_s *self)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
u32 tx_buff_size = HW_ATL2_TXBUF_MAX;
u32 rx_buff_size = HW_ATL2_RXBUF_MAX;
unsigned int prio = 0U;
u32 threshold = 0U;
u32 tc = 0U;
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
/* TPS VM init */
hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
tc = 0;
/* TX Packet Scheduler Data TC0 */
hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF0, tc);
hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, 0x640, tc);
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
/* Tx buf size TC0 */
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
/* QoS Rx buf size per TC */
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
/* QoS 802.1p priority -> TC mapping */
for (prio = 0; prio < 8; ++prio)
hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio,
cfg->tcs * prio / 8);
/* ATL2 Apply legacy ring to TC mapping */
hw_atl2_hw_queue_to_tc_map_set(self);
return aq_hw_err_from_flags(self);
}
static int hw_atl2_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
u8 *indirection_table = rss_params->indirection_table;
int i;
for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;)
hw_atl2_new_rpf_rss_redir_set(self, 0, i, indirection_table[i]);
return hw_atl_b0_hw_rss_set(self, rss_params);
}
static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self)
{
/* Tx TC/RSS number config */
hw_atl_tpb_tps_tx_tc_mode_set(self, 1U);
hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
/* Tx interrupts */
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
hw_atl_tdm_tx_dca_en_set(self, 0U);
hw_atl_tdm_tx_dca_mode_set(self, 0U);
hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
hw_atl2_tpb_tx_buf_clk_gate_en_set(self, 0U);
return aq_hw_err_from_flags(self);
}
static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self)
{
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
u8 index;
hw_atl2_rpf_act_rslvr_section_en_set(self, 0xFFFF);
hw_atl2_rpfl2_uc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC,
HW_ATL2_MAC_UC);
hw_atl2_rpfl2_bc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC);
index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX;
hw_atl2_act_rslvr_table_set(self, index, 0,
HW_ATL2_RPF_TAG_UC_MASK |
HW_ATL2_RPF_TAG_ALLMC_MASK,
HW_ATL2_ACTION_DROP);
index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX;
hw_atl2_act_rslvr_table_set(self, index, 0,
HW_ATL2_RPF_TAG_VLAN_MASK |
HW_ATL2_RPF_TAG_UNTAG_MASK,
HW_ATL2_ACTION_DROP);
index = priv->art_base_index + HW_ATL2_RPF_VLAN_INDEX;
hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_VLAN,
HW_ATL2_RPF_TAG_VLAN_MASK,
HW_ATL2_ACTION_ASSIGN_TC(0));
index = priv->art_base_index + HW_ATL2_RPF_MAC_INDEX;
hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_UC,
HW_ATL2_RPF_TAG_UC_MASK,
HW_ATL2_ACTION_ASSIGN_TC(0));
index = priv->art_base_index + HW_ATL2_RPF_ALLMC_INDEX;
hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_ALLMC,
HW_ATL2_RPF_TAG_ALLMC_MASK,
HW_ATL2_ACTION_ASSIGN_TC(0));
index = priv->art_base_index + HW_ATL2_RPF_UNTAG_INDEX;
hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_UNTAG_MASK,
HW_ATL2_RPF_TAG_UNTAG_MASK,
HW_ATL2_ACTION_ASSIGN_TC(0));
index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_ON_INDEX;
hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_VLAN_MASK,
HW_ATL2_ACTION_DISABLE);
index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_ON_INDEX;
hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_UC_MASK,
HW_ATL2_ACTION_DISABLE);
}
static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self,
bool promisc)
{
u16 off_action = (!promisc &&
!hw_atl_rpfl2promiscuous_mode_en_get(self)) ?
HW_ATL2_ACTION_DROP : HW_ATL2_ACTION_DISABLE;
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
u8 index;
index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX;
hw_atl2_act_rslvr_table_set(self, index, 0,
HW_ATL2_RPF_TAG_VLAN_MASK |
HW_ATL2_RPF_TAG_UNTAG_MASK, off_action);
}
static void hw_atl2_hw_new_rx_filter_promisc(struct aq_hw_s *self, bool promisc)
{
u16 off_action = promisc ? HW_ATL2_ACTION_DISABLE : HW_ATL2_ACTION_DROP;
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
bool vlan_promisc_enable;
u8 index;
index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX;
hw_atl2_act_rslvr_table_set(self, index, 0,
HW_ATL2_RPF_TAG_UC_MASK |
HW_ATL2_RPF_TAG_ALLMC_MASK,
off_action);
/* turn VLAN promisc mode too */
vlan_promisc_enable = hw_atl_rpf_vlan_prom_mode_en_get(self);
hw_atl2_hw_new_rx_filter_vlan_promisc(self, promisc |
vlan_promisc_enable);
}
static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
u32 tag, u32 mask, u32 action)
{
u32 val;
int err;
err = readx_poll_timeout_atomic(hw_atl2_sem_act_rslvr_get,
self, val, val == 1,
1, 10000U);
if (err)
return err;
hw_atl2_rpf_act_rslvr_record_set(self, location, tag, mask,
action);
hw_atl_reg_glb_cpu_sem_set(self, 1, HW_ATL2_FW_SM_ACT_RSLVR);
return err;
}
static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int i;
/* Rx TC/RSS number config */
hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
/* Rx flow control */
hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
hw_atl2_rpf_rss_hash_type_set(self, HW_ATL2_RPF_RSS_HASH_TYPE_ALL);
/* RSS Ring selection */
hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
HW_ATL_RSS_ENABLED_3INDEX_BITS :
HW_ATL_RSS_DISABLED);
/* Multicast filters */
for (i = HW_ATL2_MAC_MAX; i--;) {
hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
}
hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
hw_atl_reg_rx_flr_mcst_flr_set(self, HW_ATL_MCAST_FLT_ANY_TO_HOST, 0U);
/* Vlan filters */
hw_atl_rpf_vlan_outer_etht_set(self, ETH_P_8021AD);
hw_atl_rpf_vlan_inner_etht_set(self, ETH_P_8021Q);
hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
/* Always accept untagged packets */
hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
hw_atl_rpf_vlan_untagged_act_set(self, 1U);
hw_atl2_hw_init_new_rx_filters(self);
/* Rx Interrupts */
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
hw_atl_rdm_rx_dca_en_set(self, 0U);
hw_atl_rdm_rx_dca_mode_set(self, 0U);
return aq_hw_err_from_flags(self);
}
static int hw_atl2_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl2_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
[AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
u8 base_index, count;
int err;
err = hw_atl2_utils_get_action_resolve_table_caps(self, &base_index,
&count);
if (err)
return err;
priv->art_base_index = 8 * base_index;
hw_atl2_init_launchtime(self);
hw_atl2_hw_init_tx_path(self);
hw_atl2_hw_init_rx_path(self);
hw_atl_b0_hw_mac_addr_set(self, mac_addr);
self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
self->aq_fw_ops->set_state(self, MPI_INIT);
hw_atl2_hw_qos_set(self);
hw_atl2_hw_rss_set(self, &aq_nic_cfg->aq_rss);
hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
hw_atl2_rpf_new_enable_set(self, 1);
/* Reset link status and read out initial hardware counters */
self->aq_link_status.mbps = 0;
self->aq_fw_ops->update_stats(self);
err = aq_hw_err_from_flags(self);
if (err < 0)
goto err_exit;
/* Interrupts */
hw_atl_reg_irq_glb_ctl_set(self,
aq_hw_atl2_igcr_table_[aq_nic_cfg->irq_type]
[(aq_nic_cfg->vecs > 1U) ?
1 : 0]);
hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
/* Interrupts */
hw_atl_reg_gen_irq_map_set(self,
((HW_ATL2_ERR_INT << 0x18) |
(1U << 0x1F)) |
((HW_ATL2_ERR_INT << 0x10) |
(1U << 0x17)), 0U);
hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
err_exit:
return err;
}
static int hw_atl2_hw_ring_rx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
return hw_atl_b0_hw_ring_rx_init(self, aq_ring, aq_ring_param);
}
static int hw_atl2_hw_ring_tx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
return hw_atl_b0_hw_ring_tx_init(self, aq_ring, aq_ring_param);
}
#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
static int hw_atl2_hw_packet_filter_set(struct aq_hw_s *self,
unsigned int packet_filter)
{
hw_atl2_hw_new_rx_filter_promisc(self, IS_FILTER_ENABLED(IFF_PROMISC));
return hw_atl_b0_hw_packet_filter_set(self, packet_filter);
}
#undef IS_FILTER_ENABLED
static int hw_atl2_hw_multicast_list_set(struct aq_hw_s *self,
u8 ar_mac
[AQ_HW_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
u32 count)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
if (count > (HW_ATL2_MAC_MAX - HW_ATL2_MAC_MIN)) {
err = -EBADRQC;
goto err_exit;
}
for (cfg->mc_list_count = 0U;
cfg->mc_list_count < count;
++cfg->mc_list_count) {
u32 i = cfg->mc_list_count;
u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL2_MAC_MIN + i);
hw_atl_rpfl2unicast_dest_addresslsw_set(self, l,
HW_ATL2_MAC_MIN + i);
hw_atl_rpfl2unicast_dest_addressmsw_set(self, h,
HW_ATL2_MAC_MIN + i);
hw_atl2_rpfl2_uc_flr_tag_set(self, 1, HW_ATL2_MAC_MIN + i);
hw_atl_rpfl2_uc_flr_en_set(self, (cfg->is_mc_list_enabled),
HW_ATL2_MAC_MIN + i);
}
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl2_hw_interrupt_moderation_set(struct aq_hw_s *self)
{
unsigned int i = 0U;
u32 itr_tx = 2U;
u32 itr_rx = 2U;
switch (self->aq_nic_cfg->itr) {
case AQ_CFG_INTERRUPT_MODERATION_ON:
case AQ_CFG_INTERRUPT_MODERATION_AUTO:
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
hw_atl_tdm_tdm_intr_moder_en_set(self, 1U);
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
hw_atl_rdm_rdm_intr_moder_en_set(self, 1U);
if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
/* HW timers are in 2us units */
int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
int tx_min_timer = tx_max_timer / 2;
int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
int rx_min_timer = rx_max_timer / 2;
tx_max_timer = min(HW_ATL2_INTR_MODER_MAX,
tx_max_timer);
tx_min_timer = min(HW_ATL2_INTR_MODER_MIN,
tx_min_timer);
rx_max_timer = min(HW_ATL2_INTR_MODER_MAX,
rx_max_timer);
rx_min_timer = min(HW_ATL2_INTR_MODER_MIN,
rx_min_timer);
itr_tx |= tx_min_timer << 0x8U;
itr_tx |= tx_max_timer << 0x10U;
itr_rx |= rx_min_timer << 0x8U;
itr_rx |= rx_max_timer << 0x10U;
} else {
static unsigned int hw_atl2_timers_table_tx_[][2] = {
{0xfU, 0xffU}, /* 10Gbit */
{0xfU, 0x1ffU}, /* 5Gbit */
{0xfU, 0x1ffU}, /* 5Gbit 5GS */
{0xfU, 0x1ffU}, /* 2.5Gbit */
{0xfU, 0x1ffU}, /* 1Gbit */
{0xfU, 0x1ffU}, /* 100Mbit */
};
static unsigned int hw_atl2_timers_table_rx_[][2] = {
{0x6U, 0x38U},/* 10Gbit */
{0xCU, 0x70U},/* 5Gbit */
{0xCU, 0x70U},/* 5Gbit 5GS */
{0x18U, 0xE0U},/* 2.5Gbit */
{0x30U, 0x80U},/* 1Gbit */
{0x4U, 0x50U},/* 100Mbit */
};
unsigned int mbps = self->aq_link_status.mbps;
unsigned int speed_index;
speed_index = hw_atl_utils_mbps_2_speed_index(mbps);
/* Update user visible ITR settings */
self->aq_nic_cfg->tx_itr = hw_atl2_timers_table_tx_
[speed_index][1] * 2;
self->aq_nic_cfg->rx_itr = hw_atl2_timers_table_rx_
[speed_index][1] * 2;
itr_tx |= hw_atl2_timers_table_tx_
[speed_index][0] << 0x8U;
itr_tx |= hw_atl2_timers_table_tx_
[speed_index][1] << 0x10U;
itr_rx |= hw_atl2_timers_table_rx_
[speed_index][0] << 0x8U;
itr_rx |= hw_atl2_timers_table_rx_
[speed_index][1] << 0x10U;
}
break;
case AQ_CFG_INTERRUPT_MODERATION_OFF:
hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
hw_atl_tdm_tdm_intr_moder_en_set(self, 0U);
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
hw_atl_rdm_rdm_intr_moder_en_set(self, 0U);
itr_tx = 0U;
itr_rx = 0U;
break;
}
for (i = HW_ATL2_RINGS_MAX; i--;) {
hw_atl2_reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl2_hw_stop(struct aq_hw_s *self)
{
hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK);
return 0;
}
static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
{
return &self->curr_stats;
}
static int hw_atl2_hw_vlan_set(struct aq_hw_s *self,
struct aq_rx_filter_vlan *aq_vlans)
{
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
u32 queue;
u8 index;
int i;
hw_atl_rpf_vlan_prom_mode_en_set(self, 1U);
for (i = 0; i < HW_ATL_VLAN_MAX_FILTERS; i++) {
queue = HW_ATL2_ACTION_ASSIGN_QUEUE(aq_vlans[i].queue);
hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
index = priv->art_base_index + HW_ATL2_RPF_VLAN_USER_INDEX + i;
hw_atl2_act_rslvr_table_set(self, index, 0, 0,
HW_ATL2_ACTION_DISABLE);
if (aq_vlans[i].enable) {
hw_atl_rpf_vlan_id_flr_set(self,
aq_vlans[i].vlan_id, i);
hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
if (aq_vlans[i].queue != 0xFF) {
hw_atl_rpf_vlan_rxq_flr_set(self,
aq_vlans[i].queue,
i);
hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
hw_atl2_rpf_vlan_flr_tag_set(self, i + 2, i);
index = priv->art_base_index +
HW_ATL2_RPF_VLAN_USER_INDEX + i;
hw_atl2_act_rslvr_table_set(self, index,
(i + 2) << HW_ATL2_RPF_TAG_VLAN_OFFSET,
HW_ATL2_RPF_TAG_VLAN_MASK, queue);
} else {
hw_atl2_rpf_vlan_flr_tag_set(self, 1, i);
}
}
}
return aq_hw_err_from_flags(self);
}
static int hw_atl2_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
{
/* set promisc in case of disabing the vlan filter */
hw_atl_rpf_vlan_prom_mode_en_set(self, !enable);
hw_atl2_hw_new_rx_filter_vlan_promisc(self, !enable);
return aq_hw_err_from_flags(self);
}
const struct aq_hw_ops hw_atl2_ops = {
.hw_soft_reset = hw_atl2_utils_soft_reset,
.hw_prepare = hw_atl2_utils_initfw,
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl2_hw_init,
.hw_reset = hw_atl2_hw_reset,
.hw_start = hw_atl_b0_hw_start,
.hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
.hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop,
.hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start,
.hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop,
.hw_stop = hw_atl2_hw_stop,
.hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit,
.hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update,
.hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive,
.hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill,
.hw_irq_enable = hw_atl_b0_hw_irq_enable,
.hw_irq_disable = hw_atl_b0_hw_irq_disable,
.hw_irq_read = hw_atl_b0_hw_irq_read,
.hw_ring_rx_init = hw_atl2_hw_ring_rx_init,
.hw_ring_tx_init = hw_atl2_hw_ring_tx_init,
.hw_packet_filter_set = hw_atl2_hw_packet_filter_set,
.hw_filter_vlan_set = hw_atl2_hw_vlan_set,
.hw_filter_vlan_ctrl = hw_atl2_hw_vlan_ctrl,
.hw_multicast_list_set = hw_atl2_hw_multicast_list_set,
.hw_interrupt_moderation_set = hw_atl2_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl2_hw_rss_set,
.hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
.hw_get_hw_stats = hw_atl2_utils_get_hw_stats,
.hw_get_fw_version = hw_atl2_utils_get_fw_version,
.hw_set_offload = hw_atl_b0_hw_offload_set,
};
/* SPDX-License-Identifier: GPL-2.0-only */
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef HW_ATL2_H
#define HW_ATL2_H
#include "aq_common.h"
extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
extern const struct aq_hw_ops hw_atl2_ops;
#endif /* HW_ATL2_H */
/* SPDX-License-Identifier: GPL-2.0-only */
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef HW_ATL2_INTERNAL_H
#define HW_ATL2_INTERNAL_H
#include "hw_atl2_utils.h"
#define HW_ATL2_MTU_JUMBO 16352U
#define HW_ATL2_MTU 1514U
#define HW_ATL2_TX_RINGS 4U
#define HW_ATL2_RX_RINGS 4U
#define HW_ATL2_RINGS_MAX 32U
#define HW_ATL2_TXD_SIZE (16U)
#define HW_ATL2_RXD_SIZE (16U)
#define HW_ATL2_MAC_UC 0U
#define HW_ATL2_MAC_MIN 1U
#define HW_ATL2_MAC_MAX 38U
/* interrupts */
#define HW_ATL2_ERR_INT 8U
#define HW_ATL2_INT_MASK (0xFFFFFFFFU)
#define HW_ATL2_TXBUF_MAX 128U
#define HW_ATL2_RXBUF_MAX 192U
#define HW_ATL2_RSS_REDIRECTION_MAX 64U
#define HW_ATL2_TC_MAX 1U
#define HW_ATL2_RSS_MAX 8U
#define HW_ATL2_INTR_MODER_MAX 0x1FF
#define HW_ATL2_INTR_MODER_MIN 0xFF
#define HW_ATL2_MIN_RXD \
(ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE))
#define HW_ATL2_MIN_TXD \
(ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE))
#define HW_ATL2_MAX_RXD 8184U
#define HW_ATL2_MAX_TXD 8184U
#define HW_ATL2_FW_SM_ACT_RSLVR 0x3U
#define HW_ATL2_RPF_TAG_UC_OFFSET 0x0
#define HW_ATL2_RPF_TAG_ALLMC_OFFSET 0x6
#define HW_ATL2_RPF_TAG_ET_OFFSET 0x7
#define HW_ATL2_RPF_TAG_VLAN_OFFSET 0xA
#define HW_ATL2_RPF_TAG_UNTAG_OFFSET 0xE
#define HW_ATL2_RPF_TAG_L3_V4_OFFSET 0xF
#define HW_ATL2_RPF_TAG_L3_V6_OFFSET 0x12
#define HW_ATL2_RPF_TAG_L4_OFFSET 0x15
#define HW_ATL2_RPF_TAG_L4_FLEX_OFFSET 0x18
#define HW_ATL2_RPF_TAG_FLEX_OFFSET 0x1B
#define HW_ATL2_RPF_TAG_PCP_OFFSET 0x1D
#define HW_ATL2_RPF_TAG_UC_MASK (0x0000003F << HW_ATL2_RPF_TAG_UC_OFFSET)
#define HW_ATL2_RPF_TAG_ALLMC_MASK (0x00000001 << HW_ATL2_RPF_TAG_ALLMC_OFFSET)
#define HW_ATL2_RPF_TAG_UNTAG_MASK (0x00000001 << HW_ATL2_RPF_TAG_UNTAG_OFFSET)
#define HW_ATL2_RPF_TAG_VLAN_MASK (0x0000000F << HW_ATL2_RPF_TAG_VLAN_OFFSET)
#define HW_ATL2_RPF_TAG_ET_MASK (0x00000007 << HW_ATL2_RPF_TAG_ET_OFFSET)
#define HW_ATL2_RPF_TAG_L3_V4_MASK (0x00000007 << HW_ATL2_RPF_TAG_L3_V4_OFFSET)
#define HW_ATL2_RPF_TAG_L3_V6_MASK (0x00000007 << HW_ATL2_RPF_TAG_L3_V6_OFFSET)
#define HW_ATL2_RPF_TAG_L4_MASK (0x00000007 << HW_ATL2_RPF_TAG_L4_OFFSET)
#define HW_ATL2_RPF_TAG_PCP_MASK (0x00000007 << HW_ATL2_RPF_TAG_PCP_OFFSET)
#define HW_ATL2_RPF_TAG_BASE_UC BIT(HW_ATL2_RPF_TAG_UC_OFFSET)
#define HW_ATL2_RPF_TAG_BASE_ALLMC BIT(HW_ATL2_RPF_TAG_ALLMC_OFFSET)
#define HW_ATL2_RPF_TAG_BASE_UNTAG BIT(HW_ATL2_RPF_TAG_UNTAG_OFFSET)
#define HW_ATL2_RPF_TAG_BASE_VLAN BIT(HW_ATL2_RPF_TAG_VLAN_OFFSET)
enum HW_ATL2_RPF_ART_INDEX {
HW_ATL2_RPF_L2_PROMISC_OFF_INDEX,
HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX,
HW_ATL2_RPF_L3L4_USER_INDEX = 8,
HW_ATL2_RPF_ET_PCP_USER_INDEX = HW_ATL2_RPF_L3L4_USER_INDEX + 16,
HW_ATL2_RPF_VLAN_USER_INDEX = HW_ATL2_RPF_ET_PCP_USER_INDEX + 16,
HW_ATL2_RPF_PCP_TO_TC_INDEX = HW_ATL2_RPF_VLAN_USER_INDEX +
HW_ATL_VLAN_MAX_FILTERS,
HW_ATL2_RPF_VLAN_INDEX = HW_ATL2_RPF_PCP_TO_TC_INDEX +
AQ_CFG_TCS_MAX,
HW_ATL2_RPF_MAC_INDEX,
HW_ATL2_RPF_ALLMC_INDEX,
HW_ATL2_RPF_UNTAG_INDEX,
HW_ATL2_RPF_VLAN_PROMISC_ON_INDEX,
HW_ATL2_RPF_L2_PROMISC_ON_INDEX,
};
#define HW_ATL2_ACTION(ACTION, RSS, INDEX, VALID) \
((((ACTION) & 0x3U) << 8) | \
(((RSS) & 0x1U) << 7) | \
(((INDEX) & 0x3FU) << 2) | \
(((VALID) & 0x1U) << 0))
#define HW_ATL2_ACTION_DROP HW_ATL2_ACTION(0, 0, 0, 1)
#define HW_ATL2_ACTION_DISABLE HW_ATL2_ACTION(0, 0, 0, 0)
#define HW_ATL2_ACTION_ASSIGN_QUEUE(QUEUE) HW_ATL2_ACTION(1, 0, (QUEUE), 1)
#define HW_ATL2_ACTION_ASSIGN_TC(TC) HW_ATL2_ACTION(1, 1, (TC), 1)
enum HW_ATL2_RPF_RSS_HASH_TYPE {
HW_ATL2_RPF_RSS_HASH_TYPE_NONE = 0,
HW_ATL2_RPF_RSS_HASH_TYPE_IPV4 = BIT(0),
HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_TCP = BIT(1),
HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_UDP = BIT(2),
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6 = BIT(3),
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_TCP = BIT(4),
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_UDP = BIT(5),
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX = BIT(6),
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_TCP = BIT(7),
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP = BIT(8),
HW_ATL2_RPF_RSS_HASH_TYPE_ALL = HW_ATL2_RPF_RSS_HASH_TYPE_IPV4 |
HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_TCP |
HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_UDP |
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6 |
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_TCP |
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_UDP |
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX |
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_TCP |
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP,
};
#define HW_ATL_RSS_DISABLED 0x00000000U
#define HW_ATL_RSS_ENABLED_3INDEX_BITS 0xB3333333U
#define HW_ATL_MCAST_FLT_ANY_TO_HOST 0x00010FFFU
struct hw_atl2_priv {
struct statistics_s last_stats;
unsigned int art_base_index;
};
#endif /* HW_ATL2_INTERNAL_H */
// SPDX-License-Identifier: GPL-2.0-only
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#include "hw_atl2_llh.h"
#include "hw_atl2_llh_internal.h"
#include "aq_hw_utils.h"
void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR,
HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSK,
HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_SHIFT,
rss_hash_type);
}
/* rpf */
void hw_atl2_rpf_new_enable_set(struct aq_hw_s *aq_hw, u32 enable)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_NEW_EN_ADR,
HW_ATL2_RPF_NEW_EN_MSK,
HW_ATL2_RPF_NEW_EN_SHIFT,
enable);
}
void hw_atl2_rpfl2_uc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPFL2UC_TAG_ADR(filter),
HW_ATL2_RPFL2UC_TAG_MSK,
HW_ATL2_RPFL2UC_TAG_SHIFT,
tag);
}
void hw_atl2_rpfl2_bc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_L2_BC_TAG_ADR,
HW_ATL2_RPF_L2_BC_TAG_MSK,
HW_ATL2_RPF_L2_BC_TAG_SHIFT,
tag);
}
void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index,
u32 queue)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_RSS_REDIR_ADR(tc, index),
HW_ATL2_RPF_RSS_REDIR_MSK(tc),
HW_ATL2_RPF_RSS_REDIR_SHIFT(tc),
queue);
}
void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_VL_TAG_ADR(filter),
HW_ATL2_RPF_VL_TAG_MSK,
HW_ATL2_RPF_VL_TAG_SHIFT,
tag);
}
/* TX */
void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR,
HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSK,
HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_SHIFT,
clk_gate_en);
}
void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
u32 tx_intr_moderation_ctl,
u32 queue)
{
aq_hw_write_reg(aq_hw, HW_ATL2_TX_INTR_MODERATION_CTL_ADR(queue),
tx_intr_moderation_ctl);
}
void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK,
HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT,
max_credit);
}
void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight,
u32 tc)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc),
HW_ATL2_TPS_DATA_TCTWEIGHT_MSK,
HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT,
tx_pkt_shed_tc_data_weight);
}
u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, HW_ATL2_FPGA_VER_ADR);
}
void hw_atl2_init_launchtime(struct aq_hw_s *aq_hw)
{
u32 hw_ver = hw_atl2_get_hw_version(aq_hw);
aq_hw_write_reg_bit(aq_hw, HW_ATL2_LT_CTRL_ADR,
HW_ATL2_LT_CTRL_CLK_RATIO_MSK,
HW_ATL2_LT_CTRL_CLK_RATIO_SHIFT,
hw_ver < HW_ATL2_FPGA_VER_U32(1, 0, 0, 0) ?
HW_ATL2_LT_CTRL_CLK_RATIO_FULL_SPEED :
hw_ver >= HW_ATL2_FPGA_VER_U32(1, 0, 85, 2) ?
HW_ATL2_LT_CTRL_CLK_RATIO_HALF_SPEED :
HW_ATL2_LT_CTRL_CLK_RATIO_QUATER_SPEED);
}
/* set action resolver record */
void hw_atl2_rpf_act_rslvr_record_set(struct aq_hw_s *aq_hw, u8 location,
u32 tag, u32 mask, u32 action)
{
aq_hw_write_reg(aq_hw,
HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_ADR(location),
tag);
aq_hw_write_reg(aq_hw,
HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_ADR(location),
mask);
aq_hw_write_reg(aq_hw,
HW_ATL2_RPF_ACT_RSLVR_ACTN_ADR(location),
action);
}
void hw_atl2_rpf_act_rslvr_section_en_set(struct aq_hw_s *aq_hw, u32 sections)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_REC_TAB_EN_ADR,
HW_ATL2_RPF_REC_TAB_EN_MSK,
HW_ATL2_RPF_REC_TAB_EN_SHIFT,
sections);
}
void hw_atl2_mif_shared_buf_get(struct aq_hw_s *aq_hw, int offset, u32 *data,
int len)
{
int j = 0;
int i;
for (i = offset; i < offset + len; i++, j++)
data[j] = aq_hw_read_reg(aq_hw,
HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(i));
}
void hw_atl2_mif_shared_buf_write(struct aq_hw_s *aq_hw, int offset, u32 *data,
int len)
{
int j = 0;
int i;
for (i = offset; i < offset + len; i++, j++)
aq_hw_write_reg(aq_hw, HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(i),
data[j]);
}
void hw_atl2_mif_shared_buf_read(struct aq_hw_s *aq_hw, int offset, u32 *data,
int len)
{
int j = 0;
int i;
for (i = offset; i < offset + len; i++, j++)
data[j] = aq_hw_read_reg(aq_hw,
HW_ATL2_MIF_SHARED_BUFFER_OUT_ADR(i));
}
void hw_atl2_mif_host_finished_write_set(struct aq_hw_s *aq_hw, u32 finish)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_MIF_HOST_FINISHED_WRITE_ADR,
HW_ATL2_MIF_HOST_FINISHED_WRITE_MSK,
HW_ATL2_MIF_HOST_FINISHED_WRITE_SHIFT,
finish);
}
u32 hw_atl2_mif_mcp_finished_read_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL2_MIF_MCP_FINISHED_READ_ADR,
HW_ATL2_MIF_MCP_FINISHED_READ_MSK,
HW_ATL2_MIF_MCP_FINISHED_READ_SHIFT);
}
u32 hw_atl2_mif_mcp_boot_reg_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, HW_ATL2_MIF_BOOT_REG_ADR);
}
void hw_atl2_mif_mcp_boot_reg_set(struct aq_hw_s *aq_hw, u32 val)
{
return aq_hw_write_reg(aq_hw, HW_ATL2_MIF_BOOT_REG_ADR, val);
}
u32 hw_atl2_mif_host_req_int_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, HW_ATL2_MCP_HOST_REQ_INT_ADR);
}
void hw_atl2_mif_host_req_int_clr(struct aq_hw_s *aq_hw, u32 val)
{
return aq_hw_write_reg(aq_hw, HW_ATL2_MCP_HOST_REQ_INT_CLR_ADR,
val);
}
/* SPDX-License-Identifier: GPL-2.0-only */
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef HW_ATL2_LLH_H
#define HW_ATL2_LLH_H
#include <linux/types.h>
struct aq_hw_s;
/* Set TX Interrupt Moderation Control Register */
void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
u32 tx_intr_moderation_ctl,
u32 queue);
/** Set RSS HASH type */
void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type);
/* set new RPF enable */
void hw_atl2_rpf_new_enable_set(struct aq_hw_s *aq_hw, u32 enable);
/* set l2 unicast filter tag */
void hw_atl2_rpfl2_uc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter);
/* set l2 broadcast filter tag */
void hw_atl2_rpfl2_bc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag);
/* set new rss redirection table */
void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index,
u32 queue);
/* Set VLAN filter tag */
void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter);
/* set tx buffer clock gate enable */
void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en);
/* set tx packet scheduler tc data max credit */
void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc);
/* set tx packet scheduler tc data weight */
void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight,
u32 tc);
u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw);
void hw_atl2_init_launchtime(struct aq_hw_s *aq_hw);
/* set action resolver record */
void hw_atl2_rpf_act_rslvr_record_set(struct aq_hw_s *aq_hw, u8 location,
u32 tag, u32 mask, u32 action);
/* set enable action resolver section */
void hw_atl2_rpf_act_rslvr_section_en_set(struct aq_hw_s *aq_hw, u32 sections);
/* get data from firmware shared input buffer */
void hw_atl2_mif_shared_buf_get(struct aq_hw_s *aq_hw, int offset, u32 *data,
int len);
/* set data into firmware shared input buffer */
void hw_atl2_mif_shared_buf_write(struct aq_hw_s *aq_hw, int offset, u32 *data,
int len);
/* get data from firmware shared output buffer */
void hw_atl2_mif_shared_buf_read(struct aq_hw_s *aq_hw, int offset, u32 *data,
int len);
/* set host finished write shared buffer indication */
void hw_atl2_mif_host_finished_write_set(struct aq_hw_s *aq_hw, u32 finish);
/* get mcp finished read shared buffer indication */
u32 hw_atl2_mif_mcp_finished_read_get(struct aq_hw_s *aq_hw);
/* get mcp boot register */
u32 hw_atl2_mif_mcp_boot_reg_get(struct aq_hw_s *aq_hw);
/* set mcp boot register */
void hw_atl2_mif_mcp_boot_reg_set(struct aq_hw_s *aq_hw, u32 val);
/* get host interrupt request */
u32 hw_atl2_mif_host_req_int_get(struct aq_hw_s *aq_hw);
/* clear host interrupt request */
void hw_atl2_mif_host_req_int_clr(struct aq_hw_s *aq_hw, u32 val);
#endif /* HW_ATL2_LLH_H */
/* SPDX-License-Identifier: GPL-2.0-only */
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef HW_ATL2_LLH_INTERNAL_H
#define HW_ATL2_LLH_INTERNAL_H
/* RX pif_rpf_rss_hash_type_i Bitfield Definitions
*/
#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR 0x000054C8
#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSK 0x000001FF
#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSKN 0xFFFFFE00
#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_SHIFT 0
#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_WIDTH 9
/* rx rpf_new_rpf_en bitfield definitions
* preprocessor definitions for the bitfield "rpf_new_rpf_en_i".
* port="pif_rpf_new_rpf_en_i
*/
/* register address for bitfield rpf_new_rpf_en */
#define HW_ATL2_RPF_NEW_EN_ADR 0x00005104
/* bitmask for bitfield rpf_new_rpf_en */
#define HW_ATL2_RPF_NEW_EN_MSK 0x00000800
/* inverted bitmask for bitfield rpf_new_rpf_en */
#define HW_ATL2_RPF_NEW_EN_MSKN 0xfffff7ff
/* lower bit position of bitfield rpf_new_rpf_en */
#define HW_ATL2_RPF_NEW_EN_SHIFT 11
/* width of bitfield rpf_new_rpf_en */
#define HW_ATL2_RPF_NEW_EN_WIDTH 1
/* default value of bitfield rpf_new_rpf_en */
#define HW_ATL2_RPF_NEW_EN_DEFAULT 0x0
/* rx l2_uc_req_tag0{f}[5:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_uc_req_tag0{f}[7:0]".
* parameter: filter {f} | stride size 0x8 | range [0, 37]
* port="pif_rpf_l2_uc_req_tag0[5:0]"
*/
/* register address for bitfield l2_uc_req_tag0{f}[2:0] */
#define HW_ATL2_RPFL2UC_TAG_ADR(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_req_tag0{f}[2:0] */
#define HW_ATL2_RPFL2UC_TAG_MSK 0x0FC00000
/* inverted bitmask for bitfield l2_uc_req_tag0{f}[2:0] */
#define HW_ATL2_RPFL2UC_TAG_MSKN 0xF03FFFFF
/* lower bit position of bitfield l2_uc_req_tag0{f}[2:0] */
#define HW_ATL2_RPFL2UC_TAG_SHIFT 22
/* width of bitfield l2_uc_req_tag0{f}[2:0] */
#define HW_ATL2_RPFL2UC_TAG_WIDTH 6
/* default value of bitfield l2_uc_req_tag0{f}[2:0] */
#define HW_ATL2_RPFL2UC_TAG_DEFAULT 0x0
/* rpf_l2_bc_req_tag[5:0] bitfield definitions
* preprocessor definitions for the bitfield "rpf_l2_bc_req_tag[5:0]".
* port="pifrpf_l2_bc_req_tag_i[5:0]"
*/
/* register address for bitfield rpf_l2_bc_req_tag */
#define HW_ATL2_RPF_L2_BC_TAG_ADR 0x000050F0
/* bitmask for bitfield rpf_l2_bc_req_tag */
#define HW_ATL2_RPF_L2_BC_TAG_MSK 0x0000003F
/* inverted bitmask for bitfield rpf_l2_bc_req_tag */
#define HW_ATL2_RPF_L2_BC_TAG_MSKN 0xffffffc0
/* lower bit position of bitfield rpf_l2_bc_req_tag */
#define HW_ATL2_RPF_L2_BC_TAG_SHIFT 0
/* width of bitfield rpf_l2_bc_req_tag */
#define HW_ATL2_RPF_L2_BC_TAG_WIDTH 6
/* default value of bitfield rpf_l2_bc_req_tag */
#define HW_ATL2_RPF_L2_BC_TAG_DEFAULT 0x0
/* rx rpf_rss_red1_data_[4:0] bitfield definitions
* preprocessor definitions for the bitfield "rpf_rss_red1_data[4:0]".
* port="pif_rpf_rss_red1_data_i[4:0]"
*/
/* register address for bitfield rpf_rss_red1_data[4:0] */
#define HW_ATL2_RPF_RSS_REDIR_ADR(TC, INDEX) (0x00006200 + \
(0x100 * !!((TC) > 3)) + (INDEX) * 4)
/* bitmask for bitfield rpf_rss_red1_data[4:0] */
#define HW_ATL2_RPF_RSS_REDIR_MSK(TC) (0x00000001F << (5 * ((TC) % 4)))
/* lower bit position of bitfield rpf_rss_red1_data[4:0] */
#define HW_ATL2_RPF_RSS_REDIR_SHIFT(TC) (5 * ((TC) % 4))
/* width of bitfield rpf_rss_red1_data[4:0] */
#define HW_ATL2_RPF_RSS_REDIR_WIDTH 5
/* default value of bitfield rpf_rss_red1_data[4:0] */
#define HW_ATL2_RPF_RSS_REDIR_DEFAULT 0x0
/* rx vlan_req_tag0{f}[3:0] bitfield definitions
* preprocessor definitions for the bitfield "vlan_req_tag0{f}[3:0]".
* parameter: filter {f} | stride size 0x4 | range [0, 15]
* port="pif_rpf_vlan_req_tag0[3:0]"
*/
/* register address for bitfield vlan_req_tag0{f}[3:0] */
#define HW_ATL2_RPF_VL_TAG_ADR(filter) (0x00005290 + (filter) * 0x4)
/* bitmask for bitfield vlan_req_tag0{f}[3:0] */
#define HW_ATL2_RPF_VL_TAG_MSK 0x0000F000
/* inverted bitmask for bitfield vlan_req_tag0{f}[3:0] */
#define HW_ATL2_RPF_VL_TAG_MSKN 0xFFFF0FFF
/* lower bit position of bitfield vlan_req_tag0{f}[3:0] */
#define HW_ATL2_RPF_VL_TAG_SHIFT 12
/* width of bitfield vlan_req_tag0{f}[3:0] */
#define HW_ATL2_RPF_VL_TAG_WIDTH 4
/* default value of bitfield vlan_req_tag0{f}[3:0] */
#define HW_ATL2_RPF_VL_TAG_DEFAULT 0x0
/* RX rx_q{Q}_tc_map[2:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "rx_q{Q}_tc_map[2:0]".
* Parameter: Queue {Q} | bit-level stride | range [0, 31]
* PORT="pif_rx_q0_tc_map_i[2:0]"
*/
/* Register address for bitfield rx_q{Q}_tc_map[2:0] */
#define HW_ATL2_RX_Q_TC_MAP_ADR(queue) \
(((queue) < 32) ? 0x00005900 + ((queue) / 8) * 4 : 0)
/* Lower bit position of bitfield rx_q{Q}_tc_map[2:0] */
#define HW_ATL2_RX_Q_TC_MAP_SHIFT(queue) \
(((queue) < 32) ? ((queue) * 4) % 32 : 0)
/* Width of bitfield rx_q{Q}_tc_map[2:0] */
#define HW_ATL2_RX_Q_TC_MAP_WIDTH 3
/* Default value of bitfield rx_q{Q}_tc_map[2:0] */
#define HW_ATL2_RX_Q_TC_MAP_DEFAULT 0x0
/* tx tx_buffer_clk_gate_en bitfield definitions
* preprocessor definitions for the bitfield "tx_buffer_clk_gate_en".
* port="pif_tpb_tx_buffer_clk_gate_en_i"
*/
/* register address for bitfield tx_buffer_clk_gate_en */
#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR 0x00007900
/* bitmask for bitfield tx_buffer_clk_gate_en */
#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSK 0x00000020
/* inverted bitmask for bitfield tx_buffer_clk_gate_en */
#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSKN 0xffffffdf
/* lower bit position of bitfield tx_buffer_clk_gate_en */
#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_SHIFT 5
/* width of bitfield tx_buffer_clk_gate_en */
#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_WIDTH 1
/* default value of bitfield tx_buffer_clk_gate_en */
#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_DEFAULT 0x0
/* tx data_tc{t}_credit_max[b:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
* parameter: tc {t} | stride size 0x4 | range [0, 7]
* port="pif_tps_data_tc0_credit_max_i[11:0]"
*/
/* register address for bitfield data_tc{t}_credit_max[b:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000
/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff
/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
/* width of bitfield data_tc{t}_credit_max[b:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_WIDTH 12
/* default value of bitfield data_tc{t}_credit_max[b:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
/* tx data_tc{t}_weight[8:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
* parameter: tc {t} | stride size 0x4 | range [0, 7]
* port="pif_tps_data_tc0_weight_i[8:0]"
*/
/* register address for bitfield data_tc{t}_weight[8:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_weight[8:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSK 0x000001ff
/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00
/* lower bit position of bitfield data_tc{t}_weight[8:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT 0
/* width of bitfield data_tc{t}_weight[8:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH 9
/* default value of bitfield data_tc{t}_weight[8:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
/* tx interrupt moderation control register definitions
* Preprocessor definitions for TX Interrupt Moderation Control Register
* Base Address: 0x00007c28
* Parameter: queue {Q} | stride size 0x4 | range [0, 31]
*/
#define HW_ATL2_TX_INTR_MODERATION_CTL_ADR(queue) (0x00007c28u + (queue) * 0x40)
/* Launch time control register */
#define HW_ATL2_LT_CTRL_ADR 0x00007a1c
#define HW_ATL2_LT_CTRL_AVB_LEN_CMP_TRSHLD_MSK 0xFFFF0000
#define HW_ATL2_LT_CTRL_AVB_LEN_CMP_TRSHLD_SHIFT 16
#define HW_ATL2_LT_CTRL_CLK_RATIO_MSK 0x0000FF00
#define HW_ATL2_LT_CTRL_CLK_RATIO_SHIFT 8
#define HW_ATL2_LT_CTRL_CLK_RATIO_QUATER_SPEED 4
#define HW_ATL2_LT_CTRL_CLK_RATIO_HALF_SPEED 2
#define HW_ATL2_LT_CTRL_CLK_RATIO_FULL_SPEED 1
#define HW_ATL2_LT_CTRL_25G_MODE_SUPPORT_MSK 0x00000008
#define HW_ATL2_LT_CTRL_25G_MODE_SUPPORT_SHIFT 3
#define HW_ATL2_LT_CTRL_LINK_SPEED_MSK 0x00000007
#define HW_ATL2_LT_CTRL_LINK_SPEED_SHIFT 0
/* FPGA VER register */
#define HW_ATL2_FPGA_VER_ADR 0x000000f4
#define HW_ATL2_FPGA_VER_U32(mj, mi, bl, rv) \
((((mj) & 0xff) << 24) | \
(((mi) & 0xff) << 16) | \
(((bl) & 0xff) << 8) | \
(((rv) & 0xff) << 0))
/* ahb_mem_addr{f}[31:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "ahb_mem_addr{f}[31:0]".
* Parameter: filter {f} | stride size 0x10 | range [0, 127]
* PORT="ahb_mem_addr{f}[31:0]"
*/
/* Register address for bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_ADR(filter) \
(0x00014000u + (filter) * 0x10)
/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_MSK 0xFFFFFFFFu
/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_MSKN 0x00000000u
/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_SHIFT 0
/* Width of bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_WIDTH 31
/* Default value of bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_DEFAULT 0x0
/* Register address for bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_ADR(filter) \
(0x00014004u + (filter) * 0x10)
/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_MSK 0xFFFFFFFFu
/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_MSKN 0x00000000u
/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_SHIFT 0
/* Width of bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_WIDTH 31
/* Default value of bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_DEFAULT 0x0
/* Register address for bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_ACTN_ADR(filter) \
(0x00014008u + (filter) * 0x10)
/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_ACTN_MSK 0x000007FFu
/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_ACTN_MSKN 0xFFFFF800u
/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_ACTN_SHIFT 0
/* Width of bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_ACTN_WIDTH 10
/* Default value of bitfield ahb_mem_addr{f}[31:0] */
#define HW_ATL2_RPF_ACT_RSLVR_ACTN_DEFAULT 0x0
/* rpf_rec_tab_en[15:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "rpf_rec_tab_en[15:0]".
* PORT="pif_rpf_rec_tab_en[15:0]"
*/
/* Register address for bitfield rpf_rec_tab_en[15:0] */
#define HW_ATL2_RPF_REC_TAB_EN_ADR 0x00006ff0u
/* Bitmask for bitfield rpf_rec_tab_en[15:0] */
#define HW_ATL2_RPF_REC_TAB_EN_MSK 0x0000FFFFu
/* Inverted bitmask for bitfield rpf_rec_tab_en[15:0] */
#define HW_ATL2_RPF_REC_TAB_EN_MSKN 0xFFFF0000u
/* Lower bit position of bitfield rpf_rec_tab_en[15:0] */
#define HW_ATL2_RPF_REC_TAB_EN_SHIFT 0
/* Width of bitfield rpf_rec_tab_en[15:0] */
#define HW_ATL2_RPF_REC_TAB_EN_WIDTH 16
/* Default value of bitfield rpf_rec_tab_en[15:0] */
#define HW_ATL2_RPF_REC_TAB_EN_DEFAULT 0x0
/* Register address for firmware shared input buffer */
#define HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(dword) (0x00012000U + (dword) * 0x4U)
/* Register address for firmware shared output buffer */
#define HW_ATL2_MIF_SHARED_BUFFER_OUT_ADR(dword) (0x00013000U + (dword) * 0x4U)
/* pif_host_finished_buf_wr_i Bitfield Definitions
* Preprocessor definitions for the bitfield "pif_host_finished_buf_wr_i".
* PORT="pif_host_finished_buf_wr_i"
*/
/* Register address for bitfield rpif_host_finished_buf_wr_i */
#define HW_ATL2_MIF_HOST_FINISHED_WRITE_ADR 0x00000e00u
/* Bitmask for bitfield pif_host_finished_buf_wr_i */
#define HW_ATL2_MIF_HOST_FINISHED_WRITE_MSK 0x00000001u
/* Inverted bitmask for bitfield pif_host_finished_buf_wr_i */
#define HW_ATL2_MIF_HOST_FINISHED_WRITE_MSKN 0xFFFFFFFEu
/* Lower bit position of bitfield pif_host_finished_buf_wr_i */
#define HW_ATL2_MIF_HOST_FINISHED_WRITE_SHIFT 0
/* Width of bitfield pif_host_finished_buf_wr_i */
#define HW_ATL2_MIF_HOST_FINISHED_WRITE_WIDTH 1
/* Default value of bitfield pif_host_finished_buf_wr_i */
#define HW_ATL2_MIF_HOST_FINISHED_WRITE_DEFAULT 0x0
/* pif_mcp_finished_buf_rd_i Bitfield Definitions
* Preprocessor definitions for the bitfield "pif_mcp_finished_buf_rd_i".
* PORT="pif_mcp_finished_buf_rd_i"
*/
/* Register address for bitfield pif_mcp_finished_buf_rd_i */
#define HW_ATL2_MIF_MCP_FINISHED_READ_ADR 0x00000e04u
/* Bitmask for bitfield pif_mcp_finished_buf_rd_i */
#define HW_ATL2_MIF_MCP_FINISHED_READ_MSK 0x00000001u
/* Inverted bitmask for bitfield pif_mcp_finished_buf_rd_i */
#define HW_ATL2_MIF_MCP_FINISHED_READ_MSKN 0xFFFFFFFEu
/* Lower bit position of bitfield pif_mcp_finished_buf_rd_i */
#define HW_ATL2_MIF_MCP_FINISHED_READ_SHIFT 0
/* Width of bitfield pif_mcp_finished_buf_rd_i */
#define HW_ATL2_MIF_MCP_FINISHED_READ_WIDTH 1
/* Default value of bitfield pif_mcp_finished_buf_rd_i */
#define HW_ATL2_MIF_MCP_FINISHED_READ_DEFAULT 0x0
/* Register address for bitfield pif_mcp_boot_reg */
#define HW_ATL2_MIF_BOOT_REG_ADR 0x00003040u
#define HW_ATL2_MCP_HOST_REQ_INT_READY BIT(0)
#define HW_ATL2_MCP_HOST_REQ_INT_ADR 0x00000F00u
#define HW_ATL2_MCP_HOST_REQ_INT_SET_ADR 0x00000F04u
#define HW_ATL2_MCP_HOST_REQ_INT_CLR_ADR 0x00000F08u
#endif /* HW_ATL2_LLH_INTERNAL_H */
// SPDX-License-Identifier: GPL-2.0-only
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#include <linux/iopoll.h>
#include "aq_hw_utils.h"
#include "hw_atl/hw_atl_utils.h"
#include "hw_atl2_utils.h"
#include "hw_atl2_llh.h"
#include "hw_atl2_llh_internal.h"
#define HW_ATL2_FW_VER_1X 0x01000000U
#define AQ_A2_BOOT_STARTED BIT(0x18)
#define AQ_A2_CRASH_INIT BIT(0x1B)
#define AQ_A2_BOOT_CODE_FAILED BIT(0x1C)
#define AQ_A2_FW_INIT_FAILED BIT(0x1D)
#define AQ_A2_FW_INIT_COMP_SUCCESS BIT(0x1F)
#define AQ_A2_FW_BOOT_FAILED_MASK (AQ_A2_CRASH_INIT | \
AQ_A2_BOOT_CODE_FAILED | \
AQ_A2_FW_INIT_FAILED)
#define AQ_A2_FW_BOOT_COMPLETE_MASK (AQ_A2_FW_BOOT_FAILED_MASK | \
AQ_A2_FW_INIT_COMP_SUCCESS)
#define AQ_A2_FW_BOOT_REQ_REBOOT BIT(0x0)
#define AQ_A2_FW_BOOT_REQ_HOST_BOOT BIT(0x8)
#define AQ_A2_FW_BOOT_REQ_MAC_FAST_BOOT BIT(0xA)
#define AQ_A2_FW_BOOT_REQ_PHY_FAST_BOOT BIT(0xB)
int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
{
int err;
self->fw_ver_actual = hw_atl2_utils_get_fw_version(self);
if (hw_atl_utils_ver_match(HW_ATL2_FW_VER_1X,
self->fw_ver_actual) == 0) {
*fw_ops = &aq_a2_fw_ops;
} else {
aq_pr_err("Bad FW version detected: %x, but continue\n",
self->fw_ver_actual);
*fw_ops = &aq_a2_fw_ops;
}
aq_pr_trace("Detect ATL2FW %x\n", self->fw_ver_actual);
self->aq_fw_ops = *fw_ops;
err = self->aq_fw_ops->init(self);
self->chip_features |= ATL_HW_CHIP_ANTIGUA;
return err;
}
static bool hw_atl2_mcp_boot_complete(struct aq_hw_s *self)
{
u32 rbl_status;
rbl_status = hw_atl2_mif_mcp_boot_reg_get(self);
if (rbl_status & AQ_A2_FW_BOOT_COMPLETE_MASK)
return true;
/* Host boot requested */
if (hw_atl2_mif_host_req_int_get(self) & HW_ATL2_MCP_HOST_REQ_INT_READY)
return true;
return false;
}
int hw_atl2_utils_soft_reset(struct aq_hw_s *self)
{
bool rbl_complete = false;
u32 rbl_status = 0;
u32 rbl_request;
int err;
err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_boot_reg_get, self,
rbl_status,
((rbl_status & AQ_A2_BOOT_STARTED) &&
(rbl_status != 0xFFFFFFFFu)),
10, 500000);
if (err)
aq_pr_trace("Boot code probably hanged, reboot anyway");
hw_atl2_mif_host_req_int_clr(self, 0x01);
rbl_request = AQ_A2_FW_BOOT_REQ_REBOOT;
#ifdef AQ_CFG_FAST_START
rbl_request |= AQ_A2_FW_BOOT_REQ_MAC_FAST_BOOT;
#endif
hw_atl2_mif_mcp_boot_reg_set(self, rbl_request);
/* Wait for RBL boot */
err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_boot_reg_get, self,
rbl_status,
((rbl_status & AQ_A2_BOOT_STARTED) &&
(rbl_status != 0xFFFFFFFFu)),
10, 200000);
if (err) {
aq_pr_err("Boot code hanged");
goto err_exit;
}
err = readx_poll_timeout_atomic(hw_atl2_mcp_boot_complete, self,
rbl_complete,
rbl_complete,
10, 2000000);
if (err) {
aq_pr_err("FW Restart timed out");
goto err_exit;
}
rbl_status = hw_atl2_mif_mcp_boot_reg_get(self);
if (rbl_status & AQ_A2_FW_BOOT_FAILED_MASK) {
err = -EIO;
aq_pr_err("FW Restart failed");
goto err_exit;
}
if (hw_atl2_mif_host_req_int_get(self) &
HW_ATL2_MCP_HOST_REQ_INT_READY) {
err = -EIO;
aq_pr_err("No FW detected. Dynamic FW load not implemented");
goto err_exit;
}
if (self->aq_fw_ops) {
err = self->aq_fw_ops->init(self);
if (err) {
aq_pr_err("FW Init failed");
goto err_exit;
}
}
err_exit:
return err;
}
/* SPDX-License-Identifier: GPL-2.0-only */
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef HW_ATL2_UTILS_H
#define HW_ATL2_UTILS_H
#include "aq_hw.h"
/* F W A P I */
struct link_options_s {
u8 link_up:1;
u8 link_renegotiate:1;
u8 minimal_link_speed:1;
u8 internal_loopback:1;
u8 external_loopback:1;
u8 rate_10M_hd:1;
u8 rate_100M_hd:1;
u8 rate_1G_hd:1;
u8 rate_10M:1;
u8 rate_100M:1;
u8 rate_1G:1;
u8 rate_2P5G:1;
u8 rate_N2P5G:1;
u8 rate_5G:1;
u8 rate_N5G:1;
u8 rate_10G:1;
u8 eee_100M:1;
u8 eee_1G:1;
u8 eee_2P5G:1;
u8 eee_5G:1;
u8 eee_10G:1;
u8 rsvd3:3;
u8 pause_rx:1;
u8 pause_tx:1;
u8 rsvd4:1;
u8 downshift:1;
u8 downshift_retry:4;
};
struct link_control_s {
u8 mode:4;
u8 disable_crc_corruption:1;
u8 discard_short_frames:1;
u8 flow_control_mode:1;
u8 disable_length_check:1;
u8 discard_errored_frames:1;
u8 control_frame_enable:1;
u8 enable_tx_padding:1;
u8 enable_crc_forwarding:1;
u8 enable_frame_padding_removal_rx: 1;
u8 promiscuous_mode: 1;
u8 rsvd:2;
u16 rsvd2;
};
struct thermal_shutdown_s {
u8 enable:1;
u8 warning_enable:1;
u8 rsvd:6;
u8 shutdown_temperature;
u8 cold_temperature;
u8 warning_temperature;
};
struct mac_address_s {
u8 mac_address[6];
};
struct mac_address_aligned_s {
struct mac_address_s aligned;
u16 rsvd;
};
struct sleep_proxy_s {
struct wake_on_lan_s {
u8 wake_on_magic_packet:1;
u8 wake_on_pattern:1;
u8 wake_on_link_up:1;
u8 wake_on_link_down:1;
u8 wake_on_ping:1;
u8 wake_on_timer:1;
u8 rsvd:2;
u8 rsvd2;
u16 rsvd3;
u32 link_up_timeout;
u32 link_down_timeout;
u32 timer;
} wake_on_lan;
struct {
u32 mask[4];
u32 crc32;
} wake_up_pattern[8];
struct __attribute__ ((__packed__)) {
u8 arp_responder:1;
u8 echo_responder:1;
u8 igmp_client:1;
u8 echo_truncate:1;
u8 address_guard:1;
u8 ignore_fragmented:1;
u8 rsvd:2;
u16 echo_max_len;
u8 rsvd2;
} ipv4_offload;
u32 ipv4_offload_addr[8];
u32 reserved[8];
struct __attribute__ ((__packed__)) {
u8 ns_responder:1;
u8 echo_responder:1;
u8 mld_client:1;
u8 echo_truncate:1;
u8 address_guard:1;
u8 rsvd:3;
u16 echo_max_len;
u8 rsvd2;
} ipv6_offload;
u32 ipv6_offload_addr[16][4];
struct {
u16 port[16];
} tcp_port_offload;
struct {
u16 port[16];
} udp_port_offload;
struct {
u32 retry_count;
u32 retry_interval;
} ka4_offload;
struct {
u32 timeout;
u16 local_port;
u16 remote_port;
u8 remote_mac_addr[6];
u16 rsvd;
u32 rsvd2;
u32 rsvd3;
u16 rsvd4;
u16 win_size;
u32 seq_num;
u32 ack_num;
u32 local_ip;
u32 remote_ip;
} ka4_connection[16];
struct {
u32 retry_count;
u32 retry_interval;
} ka6_offload;
struct {
u32 timeout;
u16 local_port;
u16 remote_port;
u8 remote_mac_addr[6];
u16 rsvd;
u32 rsvd2;
u32 rsvd3;
u16 rsvd4;
u16 win_size;
u32 seq_num;
u32 ack_num;
u32 local_ip[4];
u32 remote_ip[4];
} ka6_connection[16];
struct {
u32 rr_count;
u32 rr_buf_len;
u32 idx_offset;
u32 rr__offset;
} mdns_offload;
};
struct pause_quanta_s {
u16 quanta_10M;
u16 threshold_10M;
u16 quanta_100M;
u16 threshold_100M;
u16 quanta_1G;
u16 threshold_1G;
u16 quanta_2P5G;
u16 threshold_2P5G;
u16 quanta_5G;
u16 threshold_5G;
u16 quanta_10G;
u16 threshold_10G;
};
struct data_buffer_status_s {
u32 data_offset;
u32 data_length;
};
struct device_caps_s {
u8 finite_flashless:1;
u8 cable_diag:1;
u8 ncsi:1;
u8 avb:1;
u8 rsvd:4;
u8 rsvd2;
u16 rsvd3;
u32 rsvd4;
};
struct version_s {
struct bundle_version_t {
u8 major;
u8 minor;
u16 build;
} bundle;
struct mac_version_t {
u8 major;
u8 minor;
u16 build;
} mac;
struct phy_version_t {
u8 major;
u8 minor;
u16 build;
} phy;
u32 rsvd;
};
struct link_status_s {
u8 link_state:4;
u8 link_rate:4;
u8 pause_tx:1;
u8 pause_rx:1;
u8 eee:1;
u8 duplex:1;
u8 rsvd:4;
u16 rsvd2;
};
struct wol_status_s {
u8 wake_count;
u8 wake_reason;
u16 wake_up_packet_length :12;
u16 wake_up_pattern_number :3;
u16 rsvd:1;
u32 wake_up_packet[379];
};
struct mac_health_monitor_s {
u8 mac_ready:1;
u8 mac_fault:1;
u8 mac_flashless_finished:1;
u8 rsvd:5;
u8 mac_temperature;
u16 mac_heart_beat;
u16 mac_fault_code;
u16 rsvd2;
};
struct phy_health_monitor_s {
u8 phy_ready:1;
u8 phy_fault:1;
u8 phy_hot_warning:1;
u8 rsvd:5;
u8 phy_temperature;
u16 phy_heart_beat;
u16 phy_fault_code;
u16 rsvd2;
};
struct device_link_caps_s {
u8 rsvd:3;
u8 internal_loopback:1;
u8 external_loopback:1;
u8 rate_10M_hd:1;
u8 rate_100M_hd:1;
u8 rate_1G_hd:1;
u8 rate_10M:1;
u8 rate_100M:1;
u8 rate_1G:1;
u8 rate_2P5G:1;
u8 rate_N2P5G:1;
u8 rate_5G:1;
u8 rate_N5G:1;
u8 rate_10G:1;
u8 rsvd3:1;
u8 eee_100M:1;
u8 eee_1G:1;
u8 eee_2P5G:1;
u8 rsvd4:1;
u8 eee_5G:1;
u8 rsvd5:1;
u8 eee_10G:1;
u8 pause_rx:1;
u8 pause_tx:1;
u8 pfc:1;
u8 downshift:1;
u8 downshift_retry:4;
};
struct sleep_proxy_caps_s {
u8 ipv4_offload:1;
u8 ipv6_offload:1;
u8 tcp_port_offload:1;
u8 udp_port_offload:1;
u8 ka4_offload:1;
u8 ka6_offload:1;
u8 mdns_offload:1;
u8 wake_on_ping:1;
u8 wake_on_magic_packet:1;
u8 wake_on_pattern:1;
u8 wake_on_timer:1;
u8 wake_on_link:1;
u8 wake_patterns_count:4;
u8 ipv4_count;
u8 ipv6_count;
u8 tcp_port_offload_count;
u8 udp_port_offload_count;
u8 tcp4_ka_count;
u8 tcp6_ka_count;
u8 igmp_offload:1;
u8 mld_offload:1;
u8 rsvd:6;
u8 rsvd2;
u16 rsvd3;
};
struct lkp_link_caps_s {
u8 rsvd:5;
u8 rate_10M_hd:1;
u8 rate_100M_hd:1;
u8 rate_1G_hd:1;
u8 rate_10M:1;
u8 rate_100M:1;
u8 rate_1G:1;
u8 rate_2P5G:1;
u8 rate_N2P5G:1;
u8 rate_5G:1;
u8 rate_N5G:1;
u8 rate_10G:1;
u8 rsvd2:1;
u8 eee_100M:1;
u8 eee_1G:1;
u8 eee_2P5G:1;
u8 rsvd3:1;
u8 eee_5G:1;
u8 rsvd4:1;
u8 eee_10G:1;
u8 pause_rx:1;
u8 pause_tx:1;
u8 rsvd5:6;
};
struct core_dump_s {
u32 reg0;
u32 reg1;
u32 reg2;
u32 hi;
u32 lo;
u32 regs[32];
};
struct trace_s {
u32 sync_counter;
u32 mem_buffer[0x1ff];
};
struct cable_diag_control_s {
u8 toggle :1;
u8 rsvd:7;
u8 wait_timeout_sec;
u16 rsvd2;
};
struct cable_diag_lane_data_s {
u8 result_code;
u8 dist;
u8 far_dist;
u8 rsvd;
};
struct cable_diag_status_s {
struct cable_diag_lane_data_s lane_data[4];
u8 transact_id;
u8 status:4;
u8 rsvd:4;
u16 rsvd2;
};
struct statistics_s {
struct {
u32 link_up;
u32 link_down;
} link;
struct {
u64 tx_unicast_octets;
u64 tx_multicast_octets;
u64 tx_broadcast_octets;
u64 rx_unicast_octets;
u64 rx_multicast_octets;
u64 rx_broadcast_octets;
u32 tx_unicast_frames;
u32 tx_multicast_frames;
u32 tx_broadcast_frames;
u32 tx_errors;
u32 rx_unicast_frames;
u32 rx_multicast_frames;
u32 rx_broadcast_frames;
u32 rx_dropped_frames;
u32 rx_error_frames;
u32 tx_good_frames;
u32 rx_good_frames;
u32 reserve_fw_gap;
} msm;
u32 main_loop_cycles;
u32 reserve_fw_gap;
};
struct filter_caps_s {
u8 l2_filters_base_index:6;
u8 flexible_filter_mask:2;
u8 l2_filter_count;
u8 ethertype_filter_base_index;
u8 ethertype_filter_count;
u8 vlan_filter_base_index;
u8 vlan_filter_count;
u8 l3_ip4_filter_base_index:4;
u8 l3_ip4_filter_count:4;
u8 l3_ip6_filter_base_index:4;
u8 l3_ip6_filter_count:4;
u8 l4_filter_base_index:4;
u8 l4_filter_count:4;
u8 l4_flex_filter_base_index:4;
u8 l4_flex_filter_count:4;
u8 rslv_tbl_base_index;
u8 rslv_tbl_count;
};
struct request_policy_s {
struct {
u8 all:1;
u8 mcast:1;
u8 rx_queue_tc_index:5;
u8 queue_or_tc:1;
} promisc;
struct {
u8 accept:1;
u8 rsvd:1;
u8 rx_queue_tc_index:5;
u8 queue_or_tc:1;
} bcast;
struct {
u8 accept:1;
u8 rsvd:1;
u8 rx_queue_tc_index:5;
u8 queue_or_tc:1;
} mcast;
u8 rsvd:8;
};
struct fw_interface_in {
u32 mtu;
u32 rsvd1;
struct mac_address_aligned_s mac_address;
struct link_control_s link_control;
u32 rsvd2;
struct link_options_s link_options;
u32 rsvd3;
struct thermal_shutdown_s thermal_shutdown;
u32 rsvd4;
struct sleep_proxy_s sleep_proxy;
u32 rsvd5;
struct pause_quanta_s pause_quanta[8];
struct cable_diag_control_s cable_diag_control;
u32 rsvd6;
struct data_buffer_status_s data_buffer_status;
u32 rsvd7;
struct request_policy_s request_policy;
};
struct transaction_counter_s {
u16 transaction_cnt_a;
u16 transaction_cnt_b;
};
struct management_status_s {
struct mac_address_s mac_address;
u16 vlan;
struct{
u32 enable : 1;
u32 rsvd:31;
} flags;
u32 rsvd1;
u32 rsvd2;
u32 rsvd3;
u32 rsvd4;
u32 rsvd5;
};
struct fw_interface_out {
struct transaction_counter_s transaction_id;
struct version_s version;
struct link_status_s link_status;
struct wol_status_s wol_status;
u32 rsvd;
u32 rsvd2;
struct mac_health_monitor_s mac_health_monitor;
u32 rsvd3;
u32 rsvd4;
struct phy_health_monitor_s phy_health_monitor;
u32 rsvd5;
u32 rsvd6;
struct cable_diag_status_s cable_diag_status;
u32 rsvd7;
struct device_link_caps_s device_link_caps;
u32 rsvd8;
struct sleep_proxy_caps_s sleep_proxy_caps;
u32 rsvd9;
struct lkp_link_caps_s lkp_link_caps;
u32 rsvd10;
struct core_dump_s core_dump;
u32 rsvd11;
struct statistics_s stats;
u32 rsvd12;
struct filter_caps_s filter_caps;
struct device_caps_s device_caps;
u32 rsvd13;
struct management_status_s management_status;
u32 reserve[21];
struct trace_s trace;
};
#define AQ_A2_FW_LINK_RATE_INVALID 0
#define AQ_A2_FW_LINK_RATE_10M 1
#define AQ_A2_FW_LINK_RATE_100M 2
#define AQ_A2_FW_LINK_RATE_1G 3
#define AQ_A2_FW_LINK_RATE_2G5 4
#define AQ_A2_FW_LINK_RATE_5G 5
#define AQ_A2_FW_LINK_RATE_10G 6
#define AQ_HOST_MODE_INVALID 0U
#define AQ_HOST_MODE_ACTIVE 1U
#define AQ_HOST_MODE_SLEEP_PROXY 2U
#define AQ_HOST_MODE_LOW_POWER 3U
#define AQ_HOST_MODE_SHUTDOWN 4U
int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
int hw_atl2_utils_soft_reset(struct aq_hw_s *self);
u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self);
int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
u8 *base_index, u8 *count);
extern const struct aq_fw_ops aq_a2_fw_ops;
#endif /* HW_ATL2_UTILS_H */
// SPDX-License-Identifier: GPL-2.0-only
/* Atlantic Network Driver
* Copyright (C) 2020 Marvell International Ltd.
*/
#include <linux/iopoll.h>
#include "aq_hw.h"
#include "hw_atl/hw_atl_llh.h"
#include "hw_atl2_utils.h"
#include "hw_atl2_llh.h"
#include "hw_atl2_internal.h"
#define AQ_A2_FW_READ_TRY_MAX 1000
#define hw_atl2_shared_buffer_write(HW, ITEM, VARIABLE) \
hw_atl2_mif_shared_buf_write(HW,\
(offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
(u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32))
#define hw_atl2_shared_buffer_get(HW, ITEM, VARIABLE) \
hw_atl2_mif_shared_buf_get(HW, \
(offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
(u32 *)&(VARIABLE), \
sizeof(VARIABLE) / sizeof(u32))
/* This should never be used on non atomic fields,
* treat any > u32 read as non atomic.
*/
#define hw_atl2_shared_buffer_read(HW, ITEM, VARIABLE) \
{\
BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \
sizeof(u32)) != 0,\
"Non aligned read " # ITEM);\
BUILD_BUG_ON_MSG(sizeof(VARIABLE) > sizeof(u32),\
"Non atomic read " # ITEM);\
hw_atl2_mif_shared_buf_read(HW, \
(offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\
(u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32));\
}
#define hw_atl2_shared_buffer_read_safe(HW, ITEM, DATA) \
hw_atl2_shared_buffer_read_block((HW), \
(offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\
sizeof(((struct fw_interface_out *)0)->ITEM) / sizeof(u32),\
(DATA))
static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
u32 offset, u32 dwords, void *data)
{
struct transaction_counter_s tid1, tid2;
int cnt = 0;
do {
do {
hw_atl2_shared_buffer_read(self, transaction_id, tid1);
cnt++;
if (cnt > AQ_A2_FW_READ_TRY_MAX)
return -ETIME;
if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
udelay(1);
} while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
hw_atl2_shared_buffer_read(self, transaction_id, tid2);
cnt++;
if (cnt > AQ_A2_FW_READ_TRY_MAX)
return -ETIME;
} while (tid2.transaction_cnt_a != tid2.transaction_cnt_b ||
tid1.transaction_cnt_a != tid2.transaction_cnt_a);
return 0;
}
static inline int hw_atl2_shared_buffer_finish_ack(struct aq_hw_s *self)
{
u32 val;
int err;
hw_atl2_mif_host_finished_write_set(self, 1U);
err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get,
self, val, val == 0U,
100, 100000U);
WARN(err, "hw_atl2_shared_buffer_finish_ack");
return err;
}
static int aq_a2_fw_init(struct aq_hw_s *self)
{
struct link_control_s link_control;
u32 mtu;
u32 val;
int err;
hw_atl2_shared_buffer_get(self, link_control, link_control);
link_control.mode = AQ_HOST_MODE_ACTIVE;
hw_atl2_shared_buffer_write(self, link_control, link_control);
hw_atl2_shared_buffer_get(self, mtu, mtu);
mtu = HW_ATL2_MTU_JUMBO;
hw_atl2_shared_buffer_write(self, mtu, mtu);
hw_atl2_mif_host_finished_write_set(self, 1U);
err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get,
self, val, val == 0U,
100, 5000000U);
WARN(err, "hw_atl2_shared_buffer_finish_ack");
return err;
}
static int aq_a2_fw_deinit(struct aq_hw_s *self)
{
struct link_control_s link_control;
hw_atl2_shared_buffer_get(self, link_control, link_control);
link_control.mode = AQ_HOST_MODE_SHUTDOWN;
hw_atl2_shared_buffer_write(self, link_control, link_control);
return hw_atl2_shared_buffer_finish_ack(self);
}
static void a2_link_speed_mask2fw(u32 speed,
struct link_options_s *link_options)
{
link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2GS);
link_options->rate_N2P5G = link_options->rate_2P5G;
link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
link_options->rate_100M = !!(speed & AQ_NIC_RATE_100M);
link_options->rate_10M = !!(speed & AQ_NIC_RATE_10M);
}
static int aq_a2_fw_set_link_speed(struct aq_hw_s *self, u32 speed)
{
struct link_options_s link_options;
hw_atl2_shared_buffer_get(self, link_options, link_options);
link_options.link_up = 1U;
a2_link_speed_mask2fw(speed, &link_options);
hw_atl2_shared_buffer_write(self, link_options, link_options);
return hw_atl2_shared_buffer_finish_ack(self);
}
static int aq_a2_fw_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state)
{
struct link_options_s link_options;
hw_atl2_shared_buffer_get(self, link_options, link_options);
switch (state) {
case MPI_INIT:
link_options.link_up = 1U;
break;
case MPI_DEINIT:
link_options.link_up = 0U;
break;
case MPI_RESET:
case MPI_POWER:
/* No actions */
break;
}
hw_atl2_shared_buffer_write(self, link_options, link_options);
return hw_atl2_shared_buffer_finish_ack(self);
}
static int aq_a2_fw_update_link_status(struct aq_hw_s *self)
{
struct link_status_s link_status;
hw_atl2_shared_buffer_read(self, link_status, link_status);
switch (link_status.link_rate) {
case AQ_A2_FW_LINK_RATE_10G:
self->aq_link_status.mbps = 10000;
break;
case AQ_A2_FW_LINK_RATE_5G:
self->aq_link_status.mbps = 5000;
break;
case AQ_A2_FW_LINK_RATE_2G5:
self->aq_link_status.mbps = 2500;
break;
case AQ_A2_FW_LINK_RATE_1G:
self->aq_link_status.mbps = 1000;
break;
case AQ_A2_FW_LINK_RATE_100M:
self->aq_link_status.mbps = 100;
break;
case AQ_A2_FW_LINK_RATE_10M:
self->aq_link_status.mbps = 10;
break;
default:
self->aq_link_status.mbps = 0;
}
return 0;
}
static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
{
struct mac_address_aligned_s mac_address;
hw_atl2_shared_buffer_get(self, mac_address, mac_address);
ether_addr_copy(mac, (u8 *)mac_address.aligned.mac_address);
if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
unsigned int rnd = 0;
u32 h;
u32 l;
get_random_bytes(&rnd, sizeof(unsigned int));
l = 0xE3000000U | (0xFFFFU & rnd) | (0x00 << 16);
h = 0x8001300EU;
mac[5] = (u8)(0xFFU & l);
l >>= 8;
mac[4] = (u8)(0xFFU & l);
l >>= 8;
mac[3] = (u8)(0xFFU & l);
l >>= 8;
mac[2] = (u8)(0xFFU & l);
mac[1] = (u8)(0xFFU & h);
h >>= 8;
mac[0] = (u8)(0xFFU & h);
}
return 0;
}
static int aq_a2_fw_update_stats(struct aq_hw_s *self)
{
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
struct statistics_s stats;
hw_atl2_shared_buffer_read_safe(self, stats, &stats);
#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \
stats.msm._F_ - priv->last_stats.msm._F_)
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc, rx_unicast_frames);
AQ_SDELTA(mprc, rx_multicast_frames);
AQ_SDELTA(bprc, rx_broadcast_frames);
AQ_SDELTA(erpr, rx_error_frames);
AQ_SDELTA(uptc, tx_unicast_frames);
AQ_SDELTA(mptc, tx_multicast_frames);
AQ_SDELTA(bptc, tx_broadcast_frames);
AQ_SDELTA(erpt, tx_errors);
AQ_SDELTA(ubrc, rx_unicast_octets);
AQ_SDELTA(ubtc, tx_unicast_octets);
AQ_SDELTA(mbrc, rx_multicast_octets);
AQ_SDELTA(mbtc, tx_multicast_octets);
AQ_SDELTA(bbrc, rx_broadcast_octets);
AQ_SDELTA(bbtc, tx_broadcast_octets);
}
#undef AQ_SDELTA
self->curr_stats.dma_pkt_rc =
hw_atl_stats_rx_dma_good_pkt_counter_get(self);
self->curr_stats.dma_pkt_tc =
hw_atl_stats_tx_dma_good_pkt_counter_get(self);
self->curr_stats.dma_oct_rc =
hw_atl_stats_rx_dma_good_octet_counter_get(self);
self->curr_stats.dma_oct_tc =
hw_atl_stats_tx_dma_good_octet_counter_get(self);
self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
memcpy(&priv->last_stats, &stats, sizeof(stats));
return 0;
}
static int aq_a2_fw_renegotiate(struct aq_hw_s *self)
{
struct link_options_s link_options;
int err;
hw_atl2_shared_buffer_get(self, link_options, link_options);
link_options.link_renegotiate = 1U;
hw_atl2_shared_buffer_write(self, link_options, link_options);
err = hw_atl2_shared_buffer_finish_ack(self);
/* We should put renegotiate status back to zero
* after command completes
*/
link_options.link_renegotiate = 0U;
hw_atl2_shared_buffer_write(self, link_options, link_options);
return err;
}
u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
{
struct version_s version;
hw_atl2_shared_buffer_read_safe(self, version, &version);
/* A2 FW version is stored in reverse order */
return version.mac.major << 24 |
version.mac.minor << 16 |
version.mac.build;
}
int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
u8 *base_index, u8 *count)
{
struct filter_caps_s filter_caps;
int err;
err = hw_atl2_shared_buffer_read_safe(self, filter_caps, &filter_caps);
if (err)
return err;
*base_index = filter_caps.rslv_tbl_base_index;
*count = filter_caps.rslv_tbl_count;
return 0;
}
const struct aq_fw_ops aq_a2_fw_ops = {
.init = aq_a2_fw_init,
.deinit = aq_a2_fw_deinit,
.reset = NULL,
.renegotiate = aq_a2_fw_renegotiate,
.get_mac_permanent = aq_a2_fw_get_mac_permanent,
.set_link_speed = aq_a2_fw_set_link_speed,
.set_state = aq_a2_fw_set_state,
.update_link_status = aq_a2_fw_update_link_status,
.update_stats = aq_a2_fw_update_stats,
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment