Commit 7110fe47 authored by David S. Miller's avatar David S. Miller

Merge branch 'aquantia'

David VomLehn says:

====================
net: ethernet: aquantia: Add AQtion 2.5/5 GB NIC driver

This series introduces the AQtion NIC driver for the aQuantia
AQC107/AQC108 network devices.

v1: Initial version
v2: o Make necessary drivers/net/ethernet changes to integrate software
    o Drop intermediate atlantic directory
    o Remove Makefile things only appropriate to out of tree module
      building
v3: o Move changes to drivers/net/ethernet/{Kconfig,Makefile} to the last
      patch to ensure clean bisection.
    o Removed inline attribute aq_hw_write_req() as it was defined in
      only one .c file.
    o #included pci.h in aq_common.h to get struct pci definition.
    o Modified code to unlock based execution flow rather than using a
      flag.
    o Made a number of functions that were only used in a single file
      static.
    o Cleaned up error and return code handling in various places.
    o Remove AQ_CFG_IP_ALIGN definition.
    o Other minor code clean up.
v4: o Using do_div for 64 bit division.
    o Modified NIC statistics code.
    o Using build_skb instead netdev_alloc_skb for single fragment packets.
    o Removed extra aq_nic.o from Makefile
v5: o Removed extra newline at the end of the files.
v6: o Removed unnecessary cast from void*.
    o Reworked strings array for ethtool statistics.
    o Added stringset == ETH_SS_STATS checking.
    o AQ_OBJ_HEADER replaced to aq_obj_header_s struct.
    o AQ_OBJ_SET/TST/CLR macroses replaced to inline functions.
    o Driver sources placed in to atlantic directory.
    o Fixed compilation warnings (Make W=1)
    o Added firmware version checking.
    o Code cleaning.
v7  o Removed unnecessary cast from memory allocation function (aq_ring.c).
v8  o Switched to using kcalloc instead kzalloc.
    o Now provide bus_info for ethtool
    o Used div() to avoid __bad_udelay build error.
Signed-off-by: default avatarAlexander Loktionov <Alexander.Loktionov@aquantia.com>
Signed-off-by: default avatarDmitrii Tarakanov <Dmitrii.Tarakanov@aquantia.com>
Signed-off-by: default avatarPavel Belous <Pavel.Belous@aquantia.com>
Signed-off-by: default avatarDavid M. VomLehn <vomlehn@texas.net>
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 82272db8 aa13f7ce
......@@ -29,6 +29,7 @@ source "drivers/net/ethernet/amazon/Kconfig"
source "drivers/net/ethernet/amd/Kconfig"
source "drivers/net/ethernet/apm/Kconfig"
source "drivers/net/ethernet/apple/Kconfig"
source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
source "drivers/net/ethernet/aurora/Kconfig"
......
......@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_VENDOR_AMAZON) += amazon/
obj-$(CONFIG_NET_VENDOR_AMD) += amd/
obj-$(CONFIG_NET_XGENE) += apm/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
......
#
# aQuantia device configuration
#
config NET_VENDOR_AQUANTIA
bool "aQuantia devices"
default y
---help---
Set this to y if you have an Ethernet network cards that uses the aQuantia
AQC107/AQC108 chipset.
This option does not build any drivers; it casues the aQuantia
drivers that can be built to appear in the list of Ethernet drivers.
if NET_VENDOR_AQUANTIA
config AQTION
tristate "aQuantia AQtion(tm) Support"
depends on PCI && X86_64
---help---
This enables the support for the aQuantia AQtion(tm) Ethernet card.
endif # NET_VENDOR_AQUANTIA
#
# Makefile for the aQuantia device drivers.
#
obj-$(CONFIG_AQTION) += atlantic/
################################################################################
#
# aQuantia Ethernet Controller AQtion Linux Driver
# Copyright(c) 2014-2017 aQuantia Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information: <rdc-drv@aquantia.com>
# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA
#
################################################################################
#
# Makefile for the AQtion(tm) Ethernet driver
#
obj-$(CONFIG_AQTION) += atlantic.o
atlantic-objs := aq_main.o \
aq_nic.o \
aq_pci_func.o \
aq_vec.o \
aq_ring.o \
aq_hw_utils.o \
aq_ethtool.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
hw_atl/hw_atl_llh.o
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_cfg.h: Definition of configuration parameters and constants. */
#ifndef AQ_CFG_H
#define AQ_CFG_H
#define AQ_CFG_VECS_DEF 4U
#define AQ_CFG_TCS_DEF 1U
#define AQ_CFG_TXDS_DEF 4096U
#define AQ_CFG_RXDS_DEF 1024U
#define AQ_CFG_IS_POLLING_DEF 0U
#define AQ_CFG_FORCE_LEGACY_INT 0U
#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U
#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU
#define AQ_CFG_IRQ_MASK 0x1FFU
#define AQ_CFG_VECS_MAX 8U
#define AQ_CFG_TCS_MAX 8U
#define AQ_CFG_TX_FRAME_MAX (16U * 1024U)
#define AQ_CFG_RX_FRAME_MAX (4U * 1024U)
/* LRO */
#define AQ_CFG_IS_LRO_DEF 1U
/* RSS */
#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 128U
#define AQ_CFG_RSS_HASHKEY_SIZE 320U
#define AQ_CFG_IS_RSS_DEF 1U
#define AQ_CFG_NUM_RSS_QUEUES_DEF AQ_CFG_VECS_DEF
#define AQ_CFG_RSS_BASE_CPU_NUM_DEF 0U
#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U
#define AQ_CFG_PCI_FUNC_PORTS 2U
#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ)
#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ))
#define AQ_CFG_SKB_FRAGS_MAX 32U
#define AQ_CFG_NAPI_WEIGHT 64U
#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
#define AQ_CFG_FC_MODE 3U
#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */
#define AQ_CFG_IS_AUTONEG_DEF 1U
#define AQ_CFG_MTU_DEF 1514U
#define AQ_CFG_LOCK_TRYS 100U
#define AQ_CFG_DRV_AUTHOR "aQuantia"
#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver"
#define AQ_CFG_DRV_NAME "aquantia"
#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
__stringify(NIC_MINOR_DRIVER_VERSION)"."\
__stringify(NIC_BUILD_DRIVER_VERSION)"."\
__stringify(NIC_REVISION_DRIVER_VERSION)
#endif /* AQ_CFG_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_common.h: Basic includes for all files in project. */
#ifndef AQ_COMMON_H
#define AQ_COMMON_H
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include "ver.h"
#include "aq_nic.h"
#include "aq_cfg.h"
#include "aq_utils.h"
#endif /* AQ_COMMON_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_ethtool.c: Definition of ethertool related functions. */
#include "aq_ethtool.h"
#include "aq_nic.h"
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 regs_count = aq_nic_get_regs_count(aq_nic);
memset(p, 0, regs_count * sizeof(u32));
aq_nic_get_regs(aq_nic, regs, p);
}
static int aq_ethtool_get_regs_len(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 regs_count = aq_nic_get_regs_count(aq_nic);
return regs_count * sizeof(u32);
}
static u32 aq_ethtool_get_link(struct net_device *ndev)
{
return ethtool_op_get_link(ndev);
}
static int aq_ethtool_get_settings(struct net_device *ndev,
struct ethtool_cmd *cmd)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
aq_nic_get_link_settings(aq_nic, cmd);
ethtool_cmd_speed_set(cmd, netif_carrier_ok(ndev) ?
aq_nic_get_link_speed(aq_nic) : 0U);
return 0;
}
static int aq_ethtool_set_settings(struct net_device *ndev,
struct ethtool_cmd *cmd)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
return aq_nic_set_link_settings(aq_nic, cmd);
}
/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */
static const unsigned int aq_ethtool_stat_queue_lines = 5U;
static const unsigned int aq_ethtool_stat_queue_chars =
5U * ETH_GSTRING_LEN;
static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
"InPackets",
"InUCast",
"InMCast",
"InBCast",
"InErrors",
"OutPackets",
"OutUCast",
"OutMCast",
"OutBCast",
"InUCastOctects",
"OutUCastOctects",
"InMCastOctects",
"OutMCastOctects",
"InBCastOctects",
"OutBCastOctects",
"InOctects",
"OutOctects",
"InPacketsDma",
"OutPacketsDma",
"InOctetsDma",
"OutOctetsDma",
"InDroppedDma",
"Queue[0] InPackets",
"Queue[0] OutPackets",
"Queue[0] InJumboPackets",
"Queue[0] InLroPackets",
"Queue[0] InErrors",
"Queue[1] InPackets",
"Queue[1] OutPackets",
"Queue[1] InJumboPackets",
"Queue[1] InLroPackets",
"Queue[1] InErrors",
"Queue[2] InPackets",
"Queue[2] OutPackets",
"Queue[2] InJumboPackets",
"Queue[2] InLroPackets",
"Queue[2] InErrors",
"Queue[3] InPackets",
"Queue[3] OutPackets",
"Queue[3] InJumboPackets",
"Queue[3] InLroPackets",
"Queue[3] InErrors",
"Queue[4] InPackets",
"Queue[4] OutPackets",
"Queue[4] InJumboPackets",
"Queue[4] InLroPackets",
"Queue[4] InErrors",
"Queue[5] InPackets",
"Queue[5] OutPackets",
"Queue[5] InJumboPackets",
"Queue[5] InLroPackets",
"Queue[5] InErrors",
"Queue[6] InPackets",
"Queue[6] OutPackets",
"Queue[6] InJumboPackets",
"Queue[6] InLroPackets",
"Queue[6] InErrors",
"Queue[7] InPackets",
"Queue[7] OutPackets",
"Queue[7] InJumboPackets",
"Queue[7] InLroPackets",
"Queue[7] InErrors",
};
static void aq_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */
BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8);
memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64));
aq_nic_get_stats(aq_nic, data);
}
static void aq_ethtool_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
u32 firmware_version = aq_nic_get_fw_version(aq_nic);
u32 regs_count = aq_nic_get_regs_count(aq_nic);
strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver));
strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u", firmware_version >> 24,
(firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU);
strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
sizeof(drvinfo->bus_info));
drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) -
(AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines;
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = regs_count;
drvinfo->eedump_len = 0;
}
static void aq_ethtool_get_strings(struct net_device *ndev,
u32 stringset, u8 *data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
if (stringset == ETH_SS_STATS)
memcpy(data, *aq_ethtool_stat_names,
sizeof(aq_ethtool_stat_names) -
(AQ_CFG_VECS_MAX - cfg->vecs) *
aq_ethtool_stat_queue_chars);
}
static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
{
int ret = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
switch (stringset) {
case ETH_SS_STATS:
ret = ARRAY_SIZE(aq_ethtool_stat_names) -
(AQ_CFG_VECS_MAX - cfg->vecs) *
aq_ethtool_stat_queue_lines;
break;
default:
ret = -EOPNOTSUPP;
}
return ret;
}
static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev)
{
return AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
}
static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
return sizeof(cfg->aq_rss.hash_secret_key);
}
static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
unsigned int i = 0U;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
if (indir) {
for (i = 0; i < AQ_CFG_RSS_INDIRECTION_TABLE_MAX; i++)
indir[i] = cfg->aq_rss.indirection_table[i];
}
if (key)
memcpy(key, cfg->aq_rss.hash_secret_key,
sizeof(cfg->aq_rss.hash_secret_key));
return 0;
}
static int aq_ethtool_get_rxnfc(struct net_device *ndev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
int err = 0;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = cfg->vecs;
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
const struct ethtool_ops aq_ethtool_ops = {
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
.get_regs = aq_ethtool_get_regs,
.get_settings = aq_ethtool_get_settings,
.set_settings = aq_ethtool_set_settings,
.get_drvinfo = aq_ethtool_get_drvinfo,
.get_strings = aq_ethtool_get_strings,
.get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
.get_rxfh_key_size = aq_ethtool_get_rss_key_size,
.get_rxfh = aq_ethtool_get_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
.get_sset_count = aq_ethtool_get_sset_count,
.get_ethtool_stats = aq_ethtool_stats
};
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_ethtool.h: Declaration of ethertool related functions. */
#ifndef AQ_ETHTOOL_H
#define AQ_ETHTOOL_H
#include "aq_common.h"
extern const struct ethtool_ops aq_ethtool_ops;
#endif /* AQ_ETHTOOL_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific
* functions.
*/
#ifndef AQ_HW_H
#define AQ_HW_H
#include "aq_common.h"
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
u64 link_speed_msk;
unsigned int hw_priv_flags;
u32 rxds;
u32 txds;
u32 txhwb_alignment;
u32 irq_mask;
u32 vecs;
u32 mtu;
u32 mac_regs_count;
u8 ports;
u8 msix_irqs;
u8 tcs;
u8 rxd_alignment;
u8 rxd_size;
u8 txd_alignment;
u8 txd_size;
u8 tx_rings;
u8 rx_rings;
bool flow_control;
bool is_64_dma;
u32 fw_ver_expected;
};
struct aq_hw_link_status_s {
unsigned int mbps;
};
#define AQ_HW_IRQ_INVALID 0U
#define AQ_HW_IRQ_LEGACY 1U
#define AQ_HW_IRQ_MSI 2U
#define AQ_HW_IRQ_MSIX 3U
#define AQ_HW_POWER_STATE_D0 0U
#define AQ_HW_POWER_STATE_D3 3U
#define AQ_HW_FLAG_STARTED 0x00000004U
#define AQ_HW_FLAG_STOPPING 0x00000008U
#define AQ_HW_FLAG_RESETTING 0x00000010U
#define AQ_HW_FLAG_CLOSING 0x00000020U
#define AQ_HW_LINK_DOWN 0x04000000U
#define AQ_HW_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_HW_FLAG_ERR_HW 0x80000000U
#define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG)
struct aq_hw_s {
struct aq_obj_s header;
struct aq_nic_cfg_s *aq_nic_cfg;
struct aq_pci_func_s *aq_pci_func;
void __iomem *mmio;
unsigned int not_ff_addr;
struct aq_hw_link_status_s aq_link_status;
};
struct aq_ring_s;
struct aq_ring_param_s;
struct aq_nic_cfg_s;
struct sk_buff;
struct aq_hw_ops {
struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func,
unsigned int port, struct aq_hw_ops *ops);
void (*destroy)(struct aq_hw_s *self);
int (*get_hw_caps)(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps);
int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
unsigned int frags);
int (*hw_ring_rx_receive)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
int (*hw_ring_rx_fill)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
unsigned int sw_tail_old);
int (*hw_ring_tx_head_update)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
int (*hw_get_mac_permanent)(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
u8 *mac);
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
int (*hw_get_link_status)(struct aq_hw_s *self,
struct aq_hw_link_status_s *link_status);
int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
int (*hw_reset)(struct aq_hw_s *self);
int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg,
u8 *mac_addr);
int (*hw_start)(struct aq_hw_s *self);
int (*hw_stop)(struct aq_hw_s *self);
int (*hw_ring_tx_init)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param);
int (*hw_ring_tx_start)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
int (*hw_ring_tx_stop)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
int (*hw_ring_rx_init)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param);
int (*hw_ring_rx_start)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
int (*hw_ring_rx_stop)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
int (*hw_irq_enable)(struct aq_hw_s *self, u64 mask);
int (*hw_irq_disable)(struct aq_hw_s *self, u64 mask);
int (*hw_irq_read)(struct aq_hw_s *self, u64 *mask);
int (*hw_packet_filter_set)(struct aq_hw_s *self,
unsigned int packet_filter);
int (*hw_multicast_list_set)(struct aq_hw_s *self,
u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
u32 count);
int (*hw_interrupt_moderation_set)(struct aq_hw_s *self,
bool itr_enabled);
int (*hw_rss_set)(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params);
int (*hw_rss_hash_set)(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params);
int (*hw_get_regs)(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
unsigned int *p_count);
int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
int (*hw_deinit)(struct aq_hw_s *self);
int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
};
#endif /* AQ_HW_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_hw_utils.c: Definitions of helper functions used across
* hardware layer.
*/
#include "aq_hw_utils.h"
#include "aq_hw.h"
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
u32 shift, u32 val)
{
if (msk ^ ~0) {
u32 reg_old, reg_new;
reg_old = aq_hw_read_reg(aq_hw, addr);
reg_new = (reg_old & (~msk)) | (val << shift);
if (reg_old != reg_new)
aq_hw_write_reg(aq_hw, addr, reg_new);
} else {
aq_hw_write_reg(aq_hw, addr, val);
}
}
u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift)
{
return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift);
}
u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
{
u32 value = readl(hw->mmio + reg);
if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr))
aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG);
return value;
}
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value)
{
writel(value, hw->mmio + reg);
}
int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) {
err = -EIO;
goto err_exit;
}
err_exit:
return err;
}
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_hw_utils.h: Declaration of helper functions used across hardware
* layer.
*/
#ifndef AQ_HW_UTILS_H
#define AQ_HW_UTILS_H
#include "aq_common.h"
#ifndef HIDWORD
#define LODWORD(_qw) ((u32)(_qw))
#define HIDWORD(_qw) ((u32)(((_qw) >> 32) & 0xffffffff))
#endif
#define AQ_HW_SLEEP(_US_) mdelay(_US_)
#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \
do { \
unsigned int AQ_HW_WAIT_FOR_i; \
for (AQ_HW_WAIT_FOR_i = _N_; (!(_B_)) && (AQ_HW_WAIT_FOR_i);\
--AQ_HW_WAIT_FOR_i) {\
udelay(_US_); \
} \
if (!AQ_HW_WAIT_FOR_i) {\
err = ETIME; \
} \
} while (0)
struct aq_hw_s;
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
u32 shift, u32 val);
u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift);
u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
#endif /* AQ_HW_UTILS_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_main.c: Main file for aQuantia Linux driver. */
#include "aq_main.h"
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
#include <linux/netdevice.h>
#include <linux/module.h>
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
{ PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
{}
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
MODULE_LICENSE("GPL v2");
MODULE_VERSION(AQ_CFG_DRV_VERSION);
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
{
struct aq_hw_ops *ops = NULL;
ops = hw_atl_a0_get_ops_by_id(pdev);
if (!ops)
ops = hw_atl_b0_get_ops_by_id(pdev);
return ops;
}
static int aq_ndev_open(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = NULL;
int err = 0;
aq_nic = aq_nic_alloc_hot(ndev);
if (!aq_nic) {
err = -ENOMEM;
goto err_exit;
}
err = aq_nic_init(aq_nic);
if (err < 0)
goto err_exit;
err = aq_nic_start(aq_nic);
if (err < 0)
goto err_exit;
err_exit:
if (err < 0)
aq_nic_deinit(aq_nic);
return err;
}
static int aq_ndev_close(struct net_device *ndev)
{
int err = 0;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
err = aq_nic_stop(aq_nic);
if (err < 0)
goto err_exit;
aq_nic_deinit(aq_nic);
aq_nic_free_hot_resources(aq_nic);
err_exit:
return err;
}
static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
err = aq_nic_xmit(aq_nic, skb);
if (err < 0)
goto err_exit;
err_exit:
return err;
}
static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
if (new_mtu == ndev->mtu) {
err = 0;
goto err_exit;
}
if (new_mtu < 68) {
err = -EINVAL;
goto err_exit;
}
err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
if (err < 0)
goto err_exit;
ndev->mtu = new_mtu;
if (netif_running(ndev)) {
aq_ndev_close(ndev);
aq_ndev_open(ndev);
}
err_exit:
return err;
}
static int aq_ndev_set_features(struct net_device *ndev,
netdev_features_t features)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
bool is_lro = false;
if (aq_cfg->hw_features & NETIF_F_LRO) {
is_lro = features & NETIF_F_LRO;
if (aq_cfg->is_lro != is_lro) {
aq_cfg->is_lro = is_lro;
if (netif_running(ndev)) {
aq_ndev_close(ndev);
aq_ndev_open(ndev);
}
}
}
return 0;
}
static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
err = eth_mac_addr(ndev, addr);
if (err < 0)
goto err_exit;
err = aq_nic_set_mac(aq_nic, ndev);
if (err < 0)
goto err_exit;
err_exit:
return err;
}
static void aq_ndev_set_multicast_settings(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
if (err < 0)
goto err_exit;
if (netdev_mc_count(ndev)) {
err = aq_nic_set_multicast_list(aq_nic, ndev);
if (err < 0)
goto err_exit;
}
err_exit:;
}
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
.ndo_start_xmit = aq_ndev_start_xmit,
.ndo_set_rx_mode = aq_ndev_set_multicast_settings,
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features
};
static int aq_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
struct aq_hw_ops *aq_hw_ops = NULL;
struct aq_pci_func_s *aq_pci_func = NULL;
int err = 0;
err = pci_enable_device(pdev);
if (err < 0)
goto err_exit;
aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev,
&aq_ndev_ops, &aq_ethtool_ops);
if (!aq_pci_func) {
err = -ENOMEM;
goto err_exit;
}
err = aq_pci_func_init(aq_pci_func);
if (err < 0)
goto err_exit;
err_exit:
if (err < 0) {
if (aq_pci_func)
aq_pci_func_free(aq_pci_func);
}
return err;
}
static void aq_pci_remove(struct pci_dev *pdev)
{
struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
aq_pci_func_deinit(aq_pci_func);
aq_pci_func_free(aq_pci_func);
}
static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
{
struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
}
static int aq_pci_resume(struct pci_dev *pdev)
{
struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
pm_message_t pm_msg = PMSG_RESTORE;
return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
}
static struct pci_driver aq_pci_ops = {
.name = AQ_CFG_DRV_NAME,
.id_table = aq_pci_tbl,
.probe = aq_pci_probe,
.remove = aq_pci_remove,
.suspend = aq_pci_suspend,
.resume = aq_pci_resume,
};
static int __init aq_module_init(void)
{
int err = 0;
err = pci_register_driver(&aq_pci_ops);
if (err < 0)
goto err_exit;
err_exit:
return err;
}
static void __exit aq_module_exit(void)
{
pci_unregister_driver(&aq_pci_ops);
}
module_init(aq_module_init);
module_exit(aq_module_exit);
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_main.h: Main file for aQuantia Linux driver. */
#ifndef AQ_MAIN_H
#define AQ_MAIN_H
#include "aq_common.h"
#endif /* AQ_MAIN_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_nic.c: Definition of common code for NIC. */
#include "aq_nic.h"
#include "aq_ring.h"
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
#include "aq_nic_internal.h"
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/timer.h>
#include <linux/cpu.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <net/ip.h>
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
struct aq_rss_parameters *rss_params = &cfg->aq_rss;
int i = 0;
static u8 rss_key[40] = {
0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
};
rss_params->hash_secret_key_size = sizeof(rss_key);
memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
for (i = rss_params->indirection_table_size; i--;)
rss_params->indirection_table[i] = i & (num_rss_queues - 1);
}
/* Fills aq_nic_cfg with valid defaults */
static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
cfg->aq_hw_caps = &self->aq_hw_caps;
cfg->vecs = AQ_CFG_VECS_DEF;
cfg->tcs = AQ_CFG_TCS_DEF;
cfg->rxds = AQ_CFG_RXDS_DEF;
cfg->txds = AQ_CFG_TXDS_DEF;
cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF;
cfg->itr = cfg->is_interrupt_moderation ?
AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U;
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
cfg->flow_control = AQ_CFG_FC_MODE;
cfg->mtu = AQ_CFG_MTU_DEF;
cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
cfg->is_lro = AQ_CFG_IS_LRO_DEF;
cfg->vlan_id = 0U;
aq_nic_rss_init(self, cfg->num_rss_queues);
}
/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
int aq_nic_cfg_start(struct aq_nic_s *self)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
/*descriptors */
cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds);
cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds);
/*rss rings */
cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs);
cfg->vecs = min(cfg->vecs, num_online_cpus());
/* cfg->vecs should be power of 2 for RSS */
if (cfg->vecs >= 8U)
cfg->vecs = 8U;
else if (cfg->vecs >= 4U)
cfg->vecs = 4U;
else if (cfg->vecs >= 2U)
cfg->vecs = 2U;
else
cfg->vecs = 1U;
cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
(self->aq_hw_caps.vecs == 1U) ||
(cfg->vecs == 1U)) {
cfg->is_rss = 0U;
cfg->vecs = 1U;
}
cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk;
cfg->hw_features = self->aq_hw_caps.hw_features;
return 0;
}
static void aq_nic_service_timer_cb(unsigned long param)
{
struct aq_nic_s *self = (struct aq_nic_s *)param;
struct net_device *ndev = aq_nic_get_ndev(self);
int err = 0;
bool is_busy = false;
unsigned int i = 0U;
struct aq_hw_link_status_s link_status;
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
atomic_inc(&self->header.busy_count);
is_busy = true;
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status);
if (err < 0)
goto err_exit;
self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
self->aq_nic_cfg.is_interrupt_moderation);
if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
if (link_status.mbps) {
aq_utils_obj_set(&self->header.flags,
AQ_NIC_FLAG_STARTED);
aq_utils_obj_clear(&self->header.flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
} else {
netif_carrier_off(self->ndev);
aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
}
self->link_status = link_status;
}
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
for (i = AQ_DIMOF(self->aq_vec); i--;) {
if (self->aq_vec[i])
aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
}
ndev->stats.rx_packets = stats_rx.packets;
ndev->stats.rx_bytes = stats_rx.bytes;
ndev->stats.rx_errors = stats_rx.errors;
ndev->stats.tx_packets = stats_tx.packets;
ndev->stats.tx_bytes = stats_tx.bytes;
ndev->stats.tx_errors = stats_tx.errors;
err_exit:
if (is_busy)
atomic_dec(&self->header.busy_count);
mod_timer(&self->service_timer,
jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
}
static void aq_nic_polling_timer_cb(unsigned long param)
{
struct aq_nic_s *self = (struct aq_nic_s *)param;
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_isr(i, (void *)aq_vec);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
}
static struct net_device *aq_nic_ndev_alloc(void)
{
return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
}
struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
const struct ethtool_ops *et_ops,
struct device *dev,
struct aq_pci_func_s *aq_pci_func,
unsigned int port,
const struct aq_hw_ops *aq_hw_ops)
{
struct net_device *ndev = NULL;
struct aq_nic_s *self = NULL;
int err = 0;
ndev = aq_nic_ndev_alloc();
self = netdev_priv(ndev);
if (!self) {
err = -EINVAL;
goto err_exit;
}
ndev->netdev_ops = ndev_ops;
ndev->ethtool_ops = et_ops;
SET_NETDEV_DEV(ndev, dev);
ndev->if_port = port;
self->ndev = ndev;
self->aq_pci_func = aq_pci_func;
self->aq_hw_ops = *aq_hw_ops;
self->port = (u8)port;
self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
&self->aq_hw_ops);
err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps);
if (err < 0)
goto err_exit;
aq_nic_cfg_init_defaults(self);
err_exit:
if (err < 0) {
aq_nic_free_hot_resources(self);
self = NULL;
}
return self;
}
int aq_nic_ndev_register(struct aq_nic_s *self)
{
int err = 0;
unsigned int i = 0U;
if (!self->ndev) {
err = -EINVAL;
goto err_exit;
}
err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
self->aq_nic_cfg.aq_hw_caps,
self->ndev->dev_addr);
if (err < 0)
goto err_exit;
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
{
static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
}
#endif
err = register_netdev(self->ndev);
if (err < 0)
goto err_exit;
self->is_ndev_registered = true;
netif_carrier_off(self->ndev);
for (i = AQ_CFG_VECS_MAX; i--;)
aq_nic_ndev_queue_stop(self, i);
err_exit:
return err;
}
int aq_nic_ndev_init(struct aq_nic_s *self)
{
struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
self->ndev->hw_features |= aq_hw_caps->hw_features;
self->ndev->features = aq_hw_caps->hw_features;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
return 0;
}
void aq_nic_ndev_free(struct aq_nic_s *self)
{
if (!self->ndev)
goto err_exit;
if (self->is_ndev_registered)
unregister_netdev(self->ndev);
if (self->aq_hw)
self->aq_hw_ops.destroy(self->aq_hw);
free_netdev(self->ndev);
err_exit:;
}
struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
{
struct aq_nic_s *self = NULL;
int err = 0;
if (!ndev) {
err = -EINVAL;
goto err_exit;
}
self = netdev_priv(ndev);
if (!self) {
err = -EINVAL;
goto err_exit;
}
if (netif_running(ndev)) {
unsigned int i;
for (i = AQ_CFG_VECS_MAX; i--;)
netif_stop_subqueue(ndev, i);
}
for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
self->aq_vecs++) {
self->aq_vec[self->aq_vecs] =
aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg);
if (!self->aq_vec[self->aq_vecs]) {
err = -ENOMEM;
goto err_exit;
}
}
err_exit:
if (err < 0) {
aq_nic_free_hot_resources(self);
self = NULL;
}
return self;
}
void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
struct aq_ring_s *ring)
{
self->aq_ring_tx[idx] = ring;
}
struct device *aq_nic_get_dev(struct aq_nic_s *self)
{
return self->ndev->dev.parent;
}
struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
{
return self->ndev;
}
int aq_nic_init(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
int err = 0;
unsigned int i = 0U;
self->power_state = AQ_HW_POWER_STATE_D0;
err = self->aq_hw_ops.hw_reset(self->aq_hw);
if (err < 0)
goto err_exit;
err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
aq_nic_get_ndev(self)->dev_addr);
if (err < 0)
goto err_exit;
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw);
err_exit:
return err;
}
void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
{
netif_start_subqueue(self->ndev, idx);
}
void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
{
netif_stop_subqueue(self->ndev, idx);
}
int aq_nic_start(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
int err = 0;
unsigned int i = 0U;
err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
if (err < 0)
goto err_exit;
err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
self->packet_filter);
if (err < 0)
goto err_exit;
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
err = aq_vec_start(aq_vec);
if (err < 0)
goto err_exit;
}
err = self->aq_hw_ops.hw_start(self->aq_hw);
if (err < 0)
goto err_exit;
err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
self->aq_nic_cfg.is_interrupt_moderation);
if (err < 0)
goto err_exit;
setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
(unsigned long)self);
mod_timer(&self->service_timer, jiffies +
AQ_CFG_SERVICE_TIMER_INTERVAL);
if (self->aq_nic_cfg.is_polling) {
setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb,
(unsigned long)self);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
} else {
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
err = aq_pci_func_alloc_irq(self->aq_pci_func, i,
self->ndev->name, aq_vec,
aq_vec_get_affinity_mask(aq_vec));
if (err < 0)
goto err_exit;
}
err = self->aq_hw_ops.hw_irq_enable(self->aq_hw,
AQ_CFG_IRQ_MASK);
if (err < 0)
goto err_exit;
}
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_nic_ndev_queue_start(self, i);
err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
if (err < 0)
goto err_exit;
err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
if (err < 0)
goto err_exit;
err_exit:
return err;
}
static unsigned int aq_nic_map_skb_frag(struct aq_nic_s *self,
struct sk_buff *skb,
struct aq_ring_buff_s *dx)
{
unsigned int ret = 0U;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int frag_count = 0U;
dx->flags = 0U;
dx->len = skb_headlen(skb);
dx->pa = dma_map_single(aq_nic_get_dev(self), skb->data, dx->len,
DMA_TO_DEVICE);
dx->len_pkt = skb->len;
dx->is_sop = 1U;
dx->is_mapped = 1U;
++ret;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
dx->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U;
dx->is_tcp_cso =
(ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U;
dx->is_udp_cso =
(ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U;
}
for (; nr_frags--; ++frag_count) {
unsigned int frag_len;
dma_addr_t frag_pa;
skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
frag_len = skb_frag_size(frag);
frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
frag_len, DMA_TO_DEVICE);
while (frag_len > AQ_CFG_TX_FRAME_MAX) {
++dx;
++ret;
dx->flags = 0U;
dx->len = AQ_CFG_TX_FRAME_MAX;
dx->pa = frag_pa;
dx->is_mapped = 1U;
frag_len -= AQ_CFG_TX_FRAME_MAX;
frag_pa += AQ_CFG_TX_FRAME_MAX;
}
++dx;
++ret;
dx->flags = 0U;
dx->len = frag_len;
dx->pa = frag_pa;
dx->is_mapped = 1U;
}
dx->is_eop = 1U;
dx->skb = skb;
return ret;
}
static unsigned int aq_nic_map_skb_lso(struct aq_nic_s *self,
struct sk_buff *skb,
struct aq_ring_buff_s *dx)
{
dx->flags = 0U;
dx->len_pkt = skb->len;
dx->len_l2 = ETH_HLEN;
dx->len_l3 = ip_hdrlen(skb);
dx->len_l4 = tcp_hdrlen(skb);
dx->mss = skb_shinfo(skb)->gso_size;
dx->is_txc = 1U;
return 1U;
}
static unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_buff_s *dx)
{
unsigned int ret = 0U;
if (unlikely(skb_is_gso(skb))) {
ret = aq_nic_map_skb_lso(self, skb, dx);
++dx;
}
ret += aq_nic_map_skb_frag(self, skb, dx);
return ret;
}
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
__releases(&ring->lock)
__acquires(&ring->lock)
{
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
unsigned int tc = 0U;
unsigned int trys = AQ_CFG_LOCK_TRYS;
int err = 0;
bool is_nic_in_bad_state;
bool is_busy = false;
struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX];
frags = skb_shinfo(skb)->nr_frags + 1;
ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
atomic_inc(&self->header.busy_count);
is_busy = true;
if (frags > AQ_CFG_SKB_FRAGS_MAX) {
dev_kfree_skb_any(skb);
goto err_exit;
}
is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags,
AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
(aq_ring_avail_dx(ring) <
AQ_CFG_SKB_FRAGS_MAX);
if (is_nic_in_bad_state) {
aq_nic_ndev_queue_stop(self, ring->idx);
err = NETDEV_TX_BUSY;
goto err_exit;
}
do {
if (spin_trylock(&ring->header.lock)) {
frags = aq_nic_map_skb(self, skb, &buffers[0]);
aq_ring_tx_append_buffs(ring, &buffers[0], frags);
err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
ring, frags);
if (err >= 0) {
if (aq_ring_avail_dx(ring) <
AQ_CFG_SKB_FRAGS_MAX + 1)
aq_nic_ndev_queue_stop(self, ring->idx);
}
spin_unlock(&ring->header.lock);
if (err >= 0) {
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
}
break;
}
} while (--trys);
if (!trys) {
err = NETDEV_TX_BUSY;
goto err_exit;
}
err_exit:
if (is_busy)
atomic_dec(&self->header.busy_count);
return err;
}
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
{
int err = 0;
err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags);
if (err < 0)
goto err_exit;
self->packet_filter = flags;
err_exit:
return err;
}
int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
{
struct netdev_hw_addr *ha = NULL;
unsigned int i = 0U;
self->mc_list.count = 0U;
netdev_for_each_mc_addr(ha, ndev) {
ether_addr_copy(self->mc_list.ar[i++], ha->addr);
++self->mc_list.count;
}
return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
}
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
{
int err = 0;
if (new_mtu > self->aq_hw_caps.mtu) {
err = -EINVAL;
goto err_exit;
}
self->aq_nic_cfg.mtu = new_mtu;
err_exit:
return err;
}
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
{
return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr);
}
unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
{
return self->link_status.mbps;
}
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
{
u32 *regs_buff = p;
int err = 0;
regs->version = 1;
err = self->aq_hw_ops.hw_get_regs(self->aq_hw,
&self->aq_hw_caps, regs_buff);
if (err < 0)
goto err_exit;
err_exit:
return err;
}
int aq_nic_get_regs_count(struct aq_nic_s *self)
{
return self->aq_hw_caps.mac_regs_count;
}
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
unsigned int count = 0U;
int err = 0;
err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count);
if (err < 0)
goto err_exit;
data += count;
count = 0U;
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
data += count;
aq_vec_get_sw_stats(aq_vec, data, &count);
}
err_exit:;
(void)err;
}
void aq_nic_get_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd)
{
cmd->port = PORT_TP;
cmd->transceiver = XCVR_EXTERNAL;
/* This driver supports only 10G capable adapters, so DUPLEX_FULL */
cmd->duplex = DUPLEX_FULL;
cmd->autoneg = self->aq_nic_cfg.is_autoneg;
cmd->supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) ?
ADVERTISED_10000baseT_Full : 0U;
cmd->supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) ?
ADVERTISED_1000baseT_Full : 0U;
cmd->supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) ?
ADVERTISED_100baseT_Full : 0U;
cmd->supported |= self->aq_hw_caps.flow_control ? SUPPORTED_Pause : 0;
cmd->supported |= SUPPORTED_Autoneg;
cmd->advertising = (self->aq_nic_cfg.is_autoneg) ?
ADVERTISED_Autoneg : 0U;
cmd->advertising |=
(self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) ?
ADVERTISED_10000baseT_Full : 0U;
cmd->advertising |=
(self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) ?
ADVERTISED_1000baseT_Full : 0U;
cmd->advertising |=
(self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) ?
ADVERTISED_100baseT_Full : 0U;
cmd->advertising |= (self->aq_nic_cfg.flow_control) ?
ADVERTISED_Pause : 0U;
}
int aq_nic_set_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd)
{
u32 speed = 0U;
u32 rate = 0U;
int err = 0;
if (cmd->autoneg == AUTONEG_ENABLE) {
rate = self->aq_hw_caps.link_speed_msk;
self->aq_nic_cfg.is_autoneg = true;
} else {
speed = ethtool_cmd_speed(cmd);
switch (speed) {
case SPEED_100:
rate = AQ_NIC_RATE_100M;
break;
case SPEED_1000:
rate = AQ_NIC_RATE_1G;
break;
case SPEED_2500:
rate = AQ_NIC_RATE_2GS;
break;
case SPEED_5000:
rate = AQ_NIC_RATE_5G;
break;
case SPEED_10000:
rate = AQ_NIC_RATE_10G;
break;
default:
err = -1;
goto err_exit;
break;
}
if (!(self->aq_hw_caps.link_speed_msk & rate)) {
err = -1;
goto err_exit;
}
self->aq_nic_cfg.is_autoneg = false;
}
err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate);
if (err < 0)
goto err_exit;
self->aq_nic_cfg.link_speed_msk = rate;
err_exit:
return err;
}
struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
{
return &self->aq_nic_cfg;
}
u32 aq_nic_get_fw_version(struct aq_nic_s *self)
{
u32 fw_version = 0U;
self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version);
return fw_version;
}
int aq_nic_stop(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_nic_ndev_queue_stop(self, i);
del_timer_sync(&self->service_timer);
self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
if (self->aq_nic_cfg.is_polling)
del_timer_sync(&self->polling_timer);
else
aq_pci_func_free_irqs(self->aq_pci_func);
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_stop(aq_vec);
return self->aq_hw_ops.hw_stop(self->aq_hw);
}
void aq_nic_deinit(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
if (!self)
goto err_exit;
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_deinit(aq_vec);
if (self->power_state == AQ_HW_POWER_STATE_D0) {
(void)self->aq_hw_ops.hw_deinit(self->aq_hw);
} else {
(void)self->aq_hw_ops.hw_set_power(self->aq_hw,
self->power_state);
}
err_exit:;
}
void aq_nic_free_hot_resources(struct aq_nic_s *self)
{
unsigned int i = 0U;
if (!self)
goto err_exit;
for (i = AQ_DIMOF(self->aq_vec); i--;) {
if (self->aq_vec[i])
aq_vec_free(self->aq_vec[i]);
}
err_exit:;
}
int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
{
int err = 0;
if (!netif_running(self->ndev)) {
err = 0;
goto err_exit;
}
rtnl_lock();
if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
self->power_state = AQ_HW_POWER_STATE_D3;
netif_device_detach(self->ndev);
netif_tx_stop_all_queues(self->ndev);
err = aq_nic_stop(self);
if (err < 0)
goto err_exit;
aq_nic_deinit(self);
} else {
err = aq_nic_init(self);
if (err < 0)
goto err_exit;
err = aq_nic_start(self);
if (err < 0)
goto err_exit;
netif_device_attach(self->ndev);
netif_tx_start_all_queues(self->ndev);
}
rtnl_unlock();
err_exit:
return err;
}
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_nic.h: Declaration of common code for NIC. */
#ifndef AQ_NIC_H
#define AQ_NIC_H
#include "aq_common.h"
#include "aq_rss.h"
struct aq_ring_s;
struct aq_pci_func_s;
struct aq_hw_ops;
#define AQ_NIC_FC_OFF 0U
#define AQ_NIC_FC_TX 1U
#define AQ_NIC_FC_RX 2U
#define AQ_NIC_FC_FULL 3U
#define AQ_NIC_FC_AUTO 4U
#define AQ_NIC_RATE_10G BIT(0)
#define AQ_NIC_RATE_5G BIT(1)
#define AQ_NIC_RATE_5GSR BIT(2)
#define AQ_NIC_RATE_2GS BIT(3)
#define AQ_NIC_RATE_1G BIT(4)
#define AQ_NIC_RATE_100M BIT(5)
struct aq_nic_cfg_s {
struct aq_hw_caps_s *aq_hw_caps;
u64 hw_features;
u32 rxds; /* rx ring size, descriptors # */
u32 txds; /* tx ring size, descriptors # */
u32 vecs; /* vecs==allocated irqs */
u32 irq_type;
u32 itr;
u32 num_rss_queues;
u32 mtu;
u32 ucp_0x364;
u32 flow_control;
u32 link_speed_msk;
u32 vlan_id;
u16 is_mc_list_enabled;
u16 mc_list_count;
bool is_autoneg;
bool is_interrupt_moderation;
bool is_polling;
bool is_rss;
bool is_lro;
u8 tcs;
struct aq_rss_parameters aq_rss;
};
#define AQ_NIC_FLAG_STARTED 0x00000004U
#define AQ_NIC_FLAG_STOPPING 0x00000008U
#define AQ_NIC_FLAG_RESETTING 0x00000010U
#define AQ_NIC_FLAG_CLOSING 0x00000020U
#define AQ_NIC_LINK_DOWN 0x04000000U
#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_NIC_FLAG_ERR_HW 0x80000000U
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
const struct ethtool_ops *et_ops,
struct device *dev,
struct aq_pci_func_s *aq_pci_func,
unsigned int port,
const struct aq_hw_ops *aq_hw_ops);
int aq_nic_ndev_init(struct aq_nic_s *self);
struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev);
void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
struct aq_ring_s *ring);
struct device *aq_nic_get_dev(struct aq_nic_s *self);
struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
int aq_nic_init(struct aq_nic_s *self);
int aq_nic_cfg_start(struct aq_nic_s *self);
int aq_nic_ndev_register(struct aq_nic_s *self);
void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx);
void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx);
void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
int aq_nic_stop(struct aq_nic_s *self);
void aq_nic_deinit(struct aq_nic_s *self);
void aq_nic_free_hot_resources(struct aq_nic_s *self);
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev);
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags);
int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev);
unsigned int aq_nic_get_link_speed(struct aq_nic_s *self);
void aq_nic_get_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd);
int aq_nic_set_link_settings(struct aq_nic_s *self, struct ethtool_cmd *cmd);
struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
u32 aq_nic_get_fw_version(struct aq_nic_s *self);
int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
#endif /* AQ_NIC_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_nic_internal.h: Definition of private object structure. */
#ifndef AQ_NIC_INTERNAL_H
#define AQ_NIC_INTERNAL_H
struct aq_nic_s {
struct aq_obj_s header;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
struct aq_hw_s *aq_hw;
struct net_device *ndev;
struct aq_pci_func_s *aq_pci_func;
unsigned int aq_vecs;
unsigned int packet_filter;
unsigned int power_state;
bool is_ndev_registered;
u8 port;
struct aq_hw_ops aq_hw_ops;
struct aq_hw_caps_s aq_hw_caps;
struct aq_nic_cfg_s aq_nic_cfg;
struct timer_list service_timer;
struct timer_list polling_timer;
struct aq_hw_link_status_s link_status;
struct {
u32 count;
u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
} mc_list;
};
#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
AQ_NIC_LINK_DOWN)
#endif /* AQ_NIC_INTERNAL_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_pci_func.c: Definition of PCI functions. */
#include "aq_pci_func.h"
#include "aq_nic.h"
#include "aq_vec.h"
#include "aq_hw.h"
#include <linux/interrupt.h>
struct aq_pci_func_s {
struct pci_dev *pdev;
struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS];
void __iomem *mmio;
void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS];
resource_size_t mmio_pa;
unsigned int msix_entry_mask;
unsigned int irq_type;
unsigned int ports;
bool is_pci_enabled;
bool is_regions;
bool is_pci_using_dac;
struct aq_hw_caps_s aq_hw_caps;
struct msix_entry msix_entry[AQ_CFG_PCI_FUNC_MSIX_IRQS];
};
struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
struct pci_dev *pdev,
const struct net_device_ops *ndev_ops,
const struct ethtool_ops *eth_ops)
{
struct aq_pci_func_s *self = NULL;
int err = 0;
unsigned int port = 0U;
if (!aq_hw_ops) {
err = -EFAULT;
goto err_exit;
}
self = kzalloc(sizeof(*self), GFP_KERNEL);
if (!self) {
err = -ENOMEM;
goto err_exit;
}
pci_set_drvdata(pdev, self);
self->pdev = pdev;
err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps);
if (err < 0)
goto err_exit;
self->ports = self->aq_hw_caps.ports;
for (port = 0; port < self->ports; ++port) {
struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
&pdev->dev, self,
port, aq_hw_ops);
if (!aq_nic) {
err = -ENOMEM;
goto err_exit;
}
self->port[port] = aq_nic;
}
err_exit:
if (err < 0) {
if (self)
aq_pci_func_free(self);
self = NULL;
}
(void)err;
return self;
}
int aq_pci_func_init(struct aq_pci_func_s *self)
{
int err = 0;
unsigned int bar = 0U;
unsigned int port = 0U;
unsigned int i = 0U;
err = pci_enable_device(self->pdev);
if (err < 0)
goto err_exit;
self->is_pci_enabled = true;
err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64));
if (!err) {
err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64));
self->is_pci_using_dac = 1;
}
if (err) {
err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32));
if (!err)
err = pci_set_consistent_dma_mask(self->pdev,
DMA_BIT_MASK(32));
self->is_pci_using_dac = 0;
}
if (err != 0) {
err = -ENOSR;
goto err_exit;
}
err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio");
if (err < 0)
goto err_exit;
self->is_regions = true;
pci_set_master(self->pdev);
for (bar = 0; bar < 4; ++bar) {
if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) {
resource_size_t reg_sz;
self->mmio_pa = pci_resource_start(self->pdev, bar);
if (self->mmio_pa == 0U) {
err = -EIO;
goto err_exit;
}
reg_sz = pci_resource_len(self->pdev, bar);
if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
err = -EIO;
goto err_exit;
}
self->mmio = ioremap_nocache(self->mmio_pa, reg_sz);
if (!self->mmio) {
err = -EIO;
goto err_exit;
}
break;
}
}
if (err < 0)
goto err_exit;
for (i = 0; i < self->aq_hw_caps.msix_irqs; i++)
self->msix_entry[i].entry = i;
/*enable interrupts */
#if AQ_CFG_FORCE_LEGACY_INT
self->irq_type = AQ_HW_IRQ_LEGACY;
#else
err = pci_enable_msix(self->pdev, self->msix_entry,
self->aq_hw_caps.msix_irqs);
if (err >= 0) {
self->irq_type = AQ_HW_IRQ_MSIX;
} else {
err = pci_enable_msi(self->pdev);
if (err >= 0) {
self->irq_type = AQ_HW_IRQ_MSI;
} else {
self->irq_type = AQ_HW_IRQ_LEGACY;
err = 0;
}
}
#endif
/* net device init */
for (port = 0; port < self->ports; ++port) {
if (!self->port[port])
continue;
err = aq_nic_cfg_start(self->port[port]);
if (err < 0)
goto err_exit;
err = aq_nic_ndev_init(self->port[port]);
if (err < 0)
goto err_exit;
err = aq_nic_ndev_register(self->port[port]);
if (err < 0)
goto err_exit;
}
err_exit:
if (err < 0)
aq_pci_func_deinit(self);
return err;
}
int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
char *name, void *aq_vec, cpumask_t *affinity_mask)
{
int err = 0;
switch (self->irq_type) {
case AQ_HW_IRQ_MSIX:
err = request_irq(self->msix_entry[i].vector, aq_vec_isr, 0,
name, aq_vec);
break;
case AQ_HW_IRQ_MSI:
err = request_irq(self->pdev->irq, aq_vec_isr, 0, name, aq_vec);
break;
case AQ_HW_IRQ_LEGACY:
err = request_irq(self->pdev->irq, aq_vec_isr_legacy,
IRQF_SHARED, name, aq_vec);
break;
default:
err = -EFAULT;
break;
}
if (err >= 0) {
self->msix_entry_mask |= (1 << i);
self->aq_vec[i] = aq_vec;
if (self->irq_type == AQ_HW_IRQ_MSIX)
irq_set_affinity_hint(self->msix_entry[i].vector,
affinity_mask);
}
return err;
}
void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
{
unsigned int i = 0U;
for (i = 32U; i--;) {
if (!((1U << i) & self->msix_entry_mask))
continue;
switch (self->irq_type) {
case AQ_HW_IRQ_MSIX:
irq_set_affinity_hint(self->msix_entry[i].vector, NULL);
free_irq(self->msix_entry[i].vector, self->aq_vec[i]);
break;
case AQ_HW_IRQ_MSI:
free_irq(self->pdev->irq, self->aq_vec[i]);
break;
case AQ_HW_IRQ_LEGACY:
free_irq(self->pdev->irq, self->aq_vec[i]);
break;
default:
break;
}
self->msix_entry_mask &= ~(1U << i);
}
}
void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self)
{
return self->mmio;
}
unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
{
return self->irq_type;
}
void aq_pci_func_deinit(struct aq_pci_func_s *self)
{
if (!self)
goto err_exit;
aq_pci_func_free_irqs(self);
switch (self->irq_type) {
case AQ_HW_IRQ_MSI:
pci_disable_msi(self->pdev);
break;
case AQ_HW_IRQ_MSIX:
pci_disable_msix(self->pdev);
break;
case AQ_HW_IRQ_LEGACY:
break;
default:
break;
}
if (self->is_regions)
pci_release_regions(self->pdev);
if (self->is_pci_enabled)
pci_disable_device(self->pdev);
err_exit:;
}
void aq_pci_func_free(struct aq_pci_func_s *self)
{
unsigned int port = 0U;
if (!self)
goto err_exit;
for (port = 0; port < self->ports; ++port) {
if (!self->port[port])
continue;
aq_nic_ndev_free(self->port[port]);
}
kfree(self);
err_exit:;
}
int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
pm_message_t *pm_msg)
{
int err = 0;
unsigned int port = 0U;
if (!self) {
err = -EFAULT;
goto err_exit;
}
for (port = 0; port < self->ports; ++port) {
if (!self->port[port])
continue;
(void)aq_nic_change_pm_state(self->port[port], pm_msg);
}
err_exit:
return err;
}
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_pci_func.h: Declaration of PCI functions. */
#ifndef AQ_PCI_FUNC_H
#define AQ_PCI_FUNC_H
#include "aq_common.h"
struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops,
struct pci_dev *pdev,
const struct net_device_ops *ndev_ops,
const struct ethtool_ops *eth_ops);
int aq_pci_func_init(struct aq_pci_func_s *self);
int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
char *name, void *aq_vec,
cpumask_t *affinity_mask);
void aq_pci_func_free_irqs(struct aq_pci_func_s *self);
int aq_pci_func_start(struct aq_pci_func_s *self);
void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self);
unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self);
void aq_pci_func_deinit(struct aq_pci_func_s *self);
void aq_pci_func_free(struct aq_pci_func_s *self);
int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
pm_message_t *pm_msg);
#endif /* AQ_PCI_FUNC_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
#include "aq_ring.h"
#include "aq_nic.h"
#include "aq_hw.h"
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic)
{
int err = 0;
self->buff_ring =
kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
if (!self->buff_ring) {
err = -ENOMEM;
goto err_exit;
}
self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
self->size * self->dx_size,
&self->dx_ring_pa, GFP_KERNEL);
if (!self->dx_ring) {
err = -ENOMEM;
goto err_exit;
}
err_exit:
if (err < 0) {
aq_ring_free(self);
self = NULL;
}
return self;
}
struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic,
unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg)
{
int err = 0;
self->aq_nic = aq_nic;
self->idx = idx;
self->size = aq_nic_cfg->txds;
self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
self = aq_ring_alloc(self, aq_nic);
if (!self) {
err = -ENOMEM;
goto err_exit;
}
err_exit:
if (err < 0) {
aq_ring_free(self);
self = NULL;
}
return self;
}
struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic,
unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg)
{
int err = 0;
self->aq_nic = aq_nic;
self->idx = idx;
self->size = aq_nic_cfg->rxds;
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
self = aq_ring_alloc(self, aq_nic);
if (!self) {
err = -ENOMEM;
goto err_exit;
}
err_exit:
if (err < 0) {
aq_ring_free(self);
self = NULL;
}
return self;
}
int aq_ring_init(struct aq_ring_s *self)
{
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
return 0;
}
void aq_ring_tx_append_buffs(struct aq_ring_s *self,
struct aq_ring_buff_s *buffer,
unsigned int buffers)
{
if (likely(self->sw_tail + buffers < self->size)) {
memcpy(&self->buff_ring[self->sw_tail], buffer,
sizeof(buffer[0]) * buffers);
} else {
unsigned int first_part = self->size - self->sw_tail;
unsigned int second_part = buffers - first_part;
memcpy(&self->buff_ring[self->sw_tail], buffer,
sizeof(buffer[0]) * first_part);
memcpy(&self->buff_ring[0], &buffer[first_part],
sizeof(buffer[0]) * second_part);
}
}
int aq_ring_tx_clean(struct aq_ring_s *self)
{
struct device *dev = aq_nic_get_dev(self->aq_nic);
for (; self->sw_head != self->hw_head;
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
if (likely(buff->is_mapped)) {
if (unlikely(buff->is_sop))
dma_unmap_single(dev, buff->pa, buff->len,
DMA_TO_DEVICE);
else
dma_unmap_page(dev, buff->pa, buff->len,
DMA_TO_DEVICE);
}
if (unlikely(buff->is_eop))
dev_kfree_skb_any(buff->skb);
}
if (aq_ring_avail_dx(self) > AQ_CFG_SKB_FRAGS_MAX)
aq_nic_ndev_queue_start(self->aq_nic, self->idx);
return 0;
}
static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i,
unsigned int t)
{
return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
}
#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
int err = 0;
bool is_rsc_completed = true;
for (; (self->sw_head != self->hw_head) && budget;
self->sw_head = aq_ring_next_dx(self, self->sw_head),
--budget, ++(*work_done)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
struct sk_buff *skb = NULL;
unsigned int next_ = 0U;
unsigned int i = 0U;
struct aq_ring_buff_s *buff_ = NULL;
if (buff->is_error) {
__free_pages(buff->page, 0);
continue;
}
if (buff->is_cleaned)
continue;
if (!buff->is_eop) {
for (next_ = buff->next,
buff_ = &self->buff_ring[next_]; true;
next_ = buff_->next,
buff_ = &self->buff_ring[next_]) {
is_rsc_completed =
aq_ring_dx_in_range(self->sw_head,
next_,
self->hw_head);
if (unlikely(!is_rsc_completed)) {
is_rsc_completed = false;
break;
}
if (buff_->is_eop)
break;
}
if (!is_rsc_completed) {
err = 0;
goto err_exit;
}
}
/* for single fragment packets use build_skb() */
if (buff->is_eop) {
skb = build_skb(page_address(buff->page),
buff->len + AQ_SKB_ALIGN);
if (unlikely(!skb)) {
err = -ENOMEM;
goto err_exit;
}
skb->dev = ndev;
skb_put(skb, buff->len);
} else {
skb = netdev_alloc_skb(ndev, ETH_HLEN);
if (unlikely(!skb)) {
err = -ENOMEM;
goto err_exit;
}
skb_put(skb, ETH_HLEN);
memcpy(skb->data, page_address(buff->page), ETH_HLEN);
skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN,
buff->len - ETH_HLEN,
SKB_TRUESIZE(buff->len - ETH_HLEN));
for (i = 1U, next_ = buff->next,
buff_ = &self->buff_ring[next_]; true;
next_ = buff_->next,
buff_ = &self->buff_ring[next_], ++i) {
skb_add_rx_frag(skb, i, buff_->page, 0,
buff_->len,
SKB_TRUESIZE(buff->len -
ETH_HLEN));
buff_->is_cleaned = 1;
if (buff_->is_eop)
break;
}
}
skb->protocol = eth_type_trans(skb, ndev);
if (unlikely(buff->is_cso_err)) {
++self->stats.rx.errors;
__skb_mark_checksum_bad(skb);
} else {
if (buff->is_ip_cso) {
__skb_incr_checksum_unnecessary(skb);
if (buff->is_udp_cso || buff->is_tcp_cso)
__skb_incr_checksum_unnecessary(skb);
} else {
skb->ip_summed = CHECKSUM_NONE;
}
}
skb_set_hash(skb, buff->rss_hash,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_NONE);
skb_record_rx_queue(skb, self->idx);
netif_receive_skb(skb);
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
}
err_exit:
return err;
}
int aq_ring_rx_fill(struct aq_ring_s *self)
{
struct aq_ring_buff_s *buff = NULL;
int err = 0;
int i = 0;
for (i = aq_ring_avail_dx(self); i--;
self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
buff = &self->buff_ring[self->sw_tail];
buff->flags = 0U;
buff->len = AQ_CFG_RX_FRAME_MAX;
buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD |
__GFP_COMP, 0);
if (!buff->page) {
err = -ENOMEM;
goto err_exit;
}
buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
buff->page, 0,
AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
err = dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa);
if (err < 0)
goto err_exit;
buff = NULL;
}
if (err < 0)
goto err_exit;
err_exit:
if (err < 0) {
if (buff && buff->page)
__free_pages(buff->page, 0);
}
return err;
}
void aq_ring_rx_deinit(struct aq_ring_s *self)
{
if (!self)
goto err_exit;
for (; self->sw_head != self->sw_tail;
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
__free_pages(buff->page, 0);
}
err_exit:;
}
void aq_ring_tx_deinit(struct aq_ring_s *self)
{
if (!self)
goto err_exit;
for (; self->sw_head != self->sw_tail;
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
struct device *ndev = aq_nic_get_dev(self->aq_nic);
if (likely(buff->is_mapped)) {
if (unlikely(buff->is_sop)) {
dma_unmap_single(ndev, buff->pa, buff->len,
DMA_TO_DEVICE);
} else {
dma_unmap_page(ndev, buff->pa, buff->len,
DMA_TO_DEVICE);
}
}
if (unlikely(buff->is_eop))
dev_kfree_skb_any(buff->skb);
}
err_exit:;
}
void aq_ring_free(struct aq_ring_s *self)
{
if (!self)
goto err_exit;
kfree(self->buff_ring);
if (self->dx_ring)
dma_free_coherent(aq_nic_get_dev(self->aq_nic),
self->size * self->dx_size, self->dx_ring,
self->dx_ring_pa);
err_exit:;
}
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
#ifndef AQ_RING_H
#define AQ_RING_H
#include "aq_common.h"
struct page;
/* TxC SOP DX EOP
* +----------+----------+----------+-----------
* 8bytes|len l3,l4 | pa | pa | pa
* +----------+----------+----------+-----------
* 4/8bytes|len pkt |len pkt | | skb
* +----------+----------+----------+-----------
* 4/8bytes|is_txc |len,flags |len |len,is_eop
* +----------+----------+----------+-----------
*
* This aq_ring_buff_s doesn't have endianness dependency.
* It is __packed for cache line optimizations.
*/
struct __packed aq_ring_buff_s {
union {
/* RX */
struct {
u32 rss_hash;
u16 next;
u8 is_hash_l4;
u8 rsvd1;
struct page *page;
};
/* EOP */
struct {
dma_addr_t pa_eop;
struct sk_buff *skb;
};
/* DX */
struct {
dma_addr_t pa;
};
/* SOP */
struct {
dma_addr_t pa_sop;
u32 len_pkt_sop;
};
/* TxC */
struct {
u32 mss;
u8 len_l2;
u8 len_l3;
u8 len_l4;
u8 rsvd2;
u32 len_pkt;
};
};
union {
struct {
u32 len:16;
u32 is_ip_cso:1;
u32 is_udp_cso:1;
u32 is_tcp_cso:1;
u32 is_cso_err:1;
u32 is_sop:1;
u32 is_eop:1;
u32 is_txc:1;
u32 is_mapped:1;
u32 is_cleaned:1;
u32 is_error:1;
u32 rsvd3:6;
};
u32 flags;
};
};
struct aq_ring_stats_rx_s {
u64 errors;
u64 packets;
u64 bytes;
u64 lro_packets;
u64 jumbo_packets;
};
struct aq_ring_stats_tx_s {
u64 errors;
u64 packets;
u64 bytes;
};
union aq_ring_stats_s {
struct aq_ring_stats_rx_s rx;
struct aq_ring_stats_tx_s tx;
};
struct aq_ring_s {
struct aq_obj_s header;
struct aq_ring_buff_s *buff_ring;
u8 *dx_ring; /* descriptors ring, dma shared mem */
struct aq_nic_s *aq_nic;
unsigned int idx; /* for HW layer registers operations */
unsigned int hw_head;
unsigned int sw_head;
unsigned int sw_tail;
unsigned int size; /* descriptors number */
unsigned int dx_size; /* TX or RX descriptor size, */
/* stored here for fater math */
union aq_ring_stats_s stats;
dma_addr_t dx_ring_pa;
};
struct aq_ring_param_s {
unsigned int vec_idx;
unsigned int cpu;
cpumask_t affinity_mask;
};
static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self,
unsigned int dx)
{
return (++dx >= self->size) ? 0U : dx;
}
static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self)
{
return (((self->sw_tail >= self->sw_head)) ?
(self->size - 1) - self->sw_tail + self->sw_head :
self->sw_head - self->sw_tail - 1);
}
struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic,
unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic,
unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
int aq_ring_init(struct aq_ring_s *self);
void aq_ring_tx_deinit(struct aq_ring_s *self);
void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self);
void aq_ring_tx_append_buffs(struct aq_ring_s *ring,
struct aq_ring_buff_s *buffer,
unsigned int buffers);
int aq_ring_tx_clean(struct aq_ring_s *self);
int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget);
int aq_ring_rx_fill(struct aq_ring_s *self);
#endif /* AQ_RING_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_rss.h: Receive Side Scaling definitions. */
#ifndef AQ_RSS_H
#define AQ_RSS_H
#include "aq_common.h"
#include "aq_cfg.h"
struct aq_rss_parameters {
u16 base_cpu_number;
u16 indirection_table_size;
u16 hash_secret_key_size;
u32 hash_secret_key[AQ_CFG_RSS_HASHKEY_SIZE / sizeof(u32)];
u8 indirection_table[AQ_CFG_RSS_INDIRECTION_TABLE_MAX];
};
#endif /* AQ_RSS_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_utils.h: Useful macro and structures used in all layers of driver. */
#ifndef AQ_UTILS_H
#define AQ_UTILS_H
#include "aq_common.h"
#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
struct aq_obj_s {
spinlock_t lock; /* spinlock for nic/rings processing */
atomic_t flags;
atomic_t busy_count;
};
static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)
{
unsigned long flags_old, flags_new;
do {
flags_old = atomic_read(flags);
flags_new = flags_old | (mask);
} while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old);
}
static inline void aq_utils_obj_clear(atomic_t *flags, u32 mask)
{
unsigned long flags_old, flags_new;
do {
flags_old = atomic_read(flags);
flags_new = flags_old & ~(mask);
} while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old);
}
static inline bool aq_utils_obj_test(atomic_t *flags, u32 mask)
{
return atomic_read(flags) & mask;
}
#endif /* AQ_UTILS_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
* Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
*/
#include "aq_vec.h"
#include "aq_nic.h"
#include "aq_ring.h"
#include "aq_hw.h"
#include <linux/netdevice.h>
struct aq_vec_s {
struct aq_obj_s header;
struct aq_hw_ops *aq_hw_ops;
struct aq_hw_s *aq_hw;
struct aq_nic_s *aq_nic;
unsigned int tx_rings;
unsigned int rx_rings;
struct aq_ring_param_s aq_ring_param;
struct napi_struct napi;
struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
};
#define AQ_VEC_TX_ID 0
#define AQ_VEC_RX_ID 1
static int aq_vec_poll(struct napi_struct *napi, int budget)
__releases(&self->lock)
__acquires(&self->lock)
{
struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
struct aq_ring_s *ring = NULL;
int work_done = 0;
int err = 0;
unsigned int i = 0U;
unsigned int sw_tail_old = 0U;
bool was_tx_cleaned = false;
if (!self) {
err = -EINVAL;
} else if (spin_trylock(&self->header.lock)) {
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
if (self->aq_hw_ops->hw_ring_tx_head_update) {
err = self->aq_hw_ops->hw_ring_tx_head_update(
self->aq_hw,
&ring[AQ_VEC_TX_ID]);
if (err < 0)
goto err_exit;
}
if (ring[AQ_VEC_TX_ID].sw_head !=
ring[AQ_VEC_TX_ID].hw_head) {
err = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
if (err < 0)
goto err_exit;
was_tx_cleaned = true;
}
err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
&ring[AQ_VEC_RX_ID]);
if (err < 0)
goto err_exit;
if (ring[AQ_VEC_RX_ID].sw_head !=
ring[AQ_VEC_RX_ID].hw_head) {
err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID],
&work_done,
budget - work_done);
if (err < 0)
goto err_exit;
sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
if (err < 0)
goto err_exit;
err = self->aq_hw_ops->hw_ring_rx_fill(
self->aq_hw,
&ring[AQ_VEC_RX_ID], sw_tail_old);
if (err < 0)
goto err_exit;
}
}
if (was_tx_cleaned)
work_done = budget;
if (work_done < budget) {
napi_complete(napi);
self->aq_hw_ops->hw_irq_enable(self->aq_hw,
1U << self->aq_ring_param.vec_idx);
}
err_exit:
spin_unlock(&self->header.lock);
}
return work_done;
}
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg)
{
struct aq_vec_s *self = NULL;
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
int err = 0;
self = kzalloc(sizeof(*self), GFP_KERNEL);
if (!self) {
err = -ENOMEM;
goto err_exit;
}
self->aq_nic = aq_nic;
self->aq_ring_param.vec_idx = idx;
self->aq_ring_param.cpu =
idx + aq_nic_cfg->aq_rss.base_cpu_number;
cpumask_set_cpu(self->aq_ring_param.cpu,
&self->aq_ring_param.affinity_mask);
self->tx_rings = 0;
self->rx_rings = 0;
netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
for (i = 0; i < aq_nic_cfg->tcs; ++i) {
unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic,
self->tx_rings,
self->aq_ring_param.vec_idx);
ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
idx_ring, aq_nic_cfg);
if (!ring) {
err = -ENOMEM;
goto err_exit;
}
++self->tx_rings;
aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
idx_ring, aq_nic_cfg);
if (!ring) {
err = -ENOMEM;
goto err_exit;
}
++self->rx_rings;
}
err_exit:
if (err < 0) {
aq_vec_free(self);
self = NULL;
}
return self;
}
int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw)
{
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
int err = 0;
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
spin_lock_init(&self->header.lock);
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
if (err < 0)
goto err_exit;
err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
&ring[AQ_VEC_TX_ID],
&self->aq_ring_param);
if (err < 0)
goto err_exit;
err = aq_ring_init(&ring[AQ_VEC_RX_ID]);
if (err < 0)
goto err_exit;
err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
&ring[AQ_VEC_RX_ID],
&self->aq_ring_param);
if (err < 0)
goto err_exit;
err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
if (err < 0)
goto err_exit;
err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
&ring[AQ_VEC_RX_ID], 0U);
if (err < 0)
goto err_exit;
}
err_exit:
return err;
}
int aq_vec_start(struct aq_vec_s *self)
{
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
int err = 0;
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
if (err < 0)
goto err_exit;
err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
&ring[AQ_VEC_RX_ID]);
if (err < 0)
goto err_exit;
}
napi_enable(&self->napi);
err_exit:
return err;
}
void aq_vec_stop(struct aq_vec_s *self)
{
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
&ring[AQ_VEC_RX_ID]);
}
napi_disable(&self->napi);
}
void aq_vec_deinit(struct aq_vec_s *self)
{
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
if (!self)
goto err_exit;
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
aq_ring_tx_deinit(&ring[AQ_VEC_TX_ID]);
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
}
err_exit:;
}
void aq_vec_free(struct aq_vec_s *self)
{
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
if (!self)
goto err_exit;
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
aq_ring_free(&ring[AQ_VEC_TX_ID]);
aq_ring_free(&ring[AQ_VEC_RX_ID]);
}
netif_napi_del(&self->napi);
kfree(self);
err_exit:;
}
irqreturn_t aq_vec_isr(int irq, void *private)
{
struct aq_vec_s *self = private;
int err = 0;
if (!self) {
err = -EINVAL;
goto err_exit;
}
napi_schedule(&self->napi);
err_exit:
return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
}
irqreturn_t aq_vec_isr_legacy(int irq, void *private)
{
struct aq_vec_s *self = private;
u64 irq_mask = 0U;
irqreturn_t err = 0;
if (!self) {
err = -EINVAL;
goto err_exit;
}
err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
if (err < 0)
goto err_exit;
if (irq_mask) {
self->aq_hw_ops->hw_irq_disable(self->aq_hw,
1U << self->aq_ring_param.vec_idx);
napi_schedule(&self->napi);
} else {
self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
err = IRQ_NONE;
}
err_exit:
return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
}
cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
{
return &self->aq_ring_param.affinity_mask;
}
void aq_vec_add_stats(struct aq_vec_s *self,
struct aq_ring_stats_rx_s *stats_rx,
struct aq_ring_stats_tx_s *stats_tx)
{
struct aq_ring_s *ring = NULL;
unsigned int r = 0U;
for (r = 0U, ring = self->ring[0];
self->tx_rings > r; ++r, ring = self->ring[r]) {
struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
stats_rx->packets += rx->packets;
stats_rx->bytes += rx->bytes;
stats_rx->errors += rx->errors;
stats_rx->jumbo_packets += rx->jumbo_packets;
stats_rx->lro_packets += rx->lro_packets;
stats_tx->packets += tx->packets;
stats_tx->bytes += tx->bytes;
stats_tx->errors += tx->errors;
}
}
int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
{
unsigned int count = 0U;
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
aq_vec_add_stats(self, &stats_rx, &stats_tx);
data[count] += stats_rx.packets;
data[++count] += stats_tx.packets;
data[++count] += stats_rx.jumbo_packets;
data[++count] += stats_rx.lro_packets;
data[++count] += stats_rx.errors;
if (p_count)
*p_count = ++count;
return 0;
}
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File aq_vec.h: Definition of common structures for vector of Rx and Tx rings.
* Declaration of functions for Rx and Tx rings.
*/
#ifndef AQ_VEC_H
#define AQ_VEC_H
#include "aq_common.h"
#include <linux/irqreturn.h>
struct aq_hw_s;
struct aq_hw_ops;
struct aq_ring_stats_rx_s;
struct aq_ring_stats_tx_s;
irqreturn_t aq_vec_isr(int irq, void *private);
irqreturn_t aq_vec_isr_legacy(int irq, void *private);
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw);
void aq_vec_deinit(struct aq_vec_s *self);
void aq_vec_free(struct aq_vec_s *self);
int aq_vec_start(struct aq_vec_s *self);
void aq_vec_stop(struct aq_vec_s *self);
cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self);
int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data,
unsigned int *p_count);
void aq_vec_add_stats(struct aq_vec_s *self,
struct aq_ring_stats_rx_s *stats_rx,
struct aq_ring_stats_tx_s *stats_tx);
#endif /* AQ_VEC_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */
#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
#include "hw_atl_a0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#include "hw_atl_a0_internal.h"
static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps)
{
memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
return 0;
}
static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func,
unsigned int port,
struct aq_hw_ops *ops)
{
struct hw_atl_s *self = NULL;
self = kzalloc(sizeof(*self), GFP_KERNEL);
if (!self)
goto err_exit;
self->base.aq_pci_func = aq_pci_func;
self->base.not_ff_addr = 0x10U;
err_exit:
return (struct aq_hw_s *)self;
}
static void hw_atl_a0_destroy(struct aq_hw_s *self)
{
kfree(self);
}
static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
glb_glb_reg_res_dis_set(self, 1U);
pci_pci_reg_res_dis_set(self, 0U);
rx_rx_reg_res_dis_set(self, 0U);
tx_tx_reg_res_dis_set(self, 0U);
HW_ATL_FLUSH();
glb_soft_res_set(self, 1);
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
itr_irq_reg_res_dis_set(self, 0U);
itr_res_irq_set(self, 1U);
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
{
u32 tc = 0U;
u32 buff_size = 0U;
unsigned int i_priority = 0U;
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
/* TPS VM init */
tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
/* Tx buf size */
buff_size = HW_ATL_A0_TXBUF_MAX;
tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
tpb_tx_buff_hi_threshold_per_tc_set(self,
(buff_size * (1024 / 32U) * 66U) /
100U, tc);
tpb_tx_buff_lo_threshold_per_tc_set(self,
(buff_size * (1024 / 32U) * 50U) /
100U, tc);
/* QoS Rx buf size per TC */
tc = 0;
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_A0_RXBUF_MAX;
rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
rpb_rx_buff_hi_threshold_per_tc_set(self,
(buff_size *
(1024U / 32U) * 66U) /
100U, tc);
rpb_rx_buff_lo_threshold_per_tc_set(self,
(buff_size *
(1024U / 32U) * 50U) /
100U, tc);
rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = NULL;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
cfg = self->aq_nic_cfg;
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
__swab32(rss_params->hash_secret_key[i]) : 0U;
rpf_rss_key_wr_data_set(self, key_data);
rpf_rss_key_addr_set(self, addr);
rpf_rss_key_wr_en_set(self, 1U);
AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
}
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
u8 *indirection_table = rss_params->indirection_table;
u32 i = 0U;
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
int err = 0;
u16 bitary[(HW_ATL_A0_RSS_REDIRECTION_MAX *
HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
memset(bitary, 0, sizeof(bitary));
for (i = HW_ATL_A0_RSS_REDIRECTION_MAX; i--; ) {
(*(u32 *)(bitary + ((i * 3U) / 16U))) |=
((indirection_table[i] % num_rss_queues) <<
((i * 3U) & 0xFU));
}
for (i = AQ_DIMOF(bitary); i--;) {
rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
rpf_rss_redir_tbl_addr_set(self, i);
rpf_rss_redir_wr_en_set(self, 1U);
AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
}
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg)
{
int err = 0;
/* TX checksums offloads*/
tpo_ipv4header_crc_offload_en_set(self, 1);
tpo_tcp_udp_crc_offload_en_set(self, 1);
if (err < 0)
goto err_exit;
/* RX checksums offloads*/
rpo_ipv4header_crc_offload_en_set(self, 1);
rpo_tcp_udp_crc_offload_en_set(self, 1);
if (err < 0)
goto err_exit;
/* LSO offloads*/
tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
if (err < 0)
goto err_exit;
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
{
thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
/* Tx interrupts */
tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
0x00010000U : 0x00000000U);
tdm_tx_dca_en_set(self, 0U);
tdm_tx_dca_mode_set(self, 0U);
tpb_tx_path_scp_ins_en_set(self, 1U);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int i;
/* Rx TC/RSS number config */
rpb_rpf_rx_traf_class_mode_set(self, 1U);
/* Rx flow control */
rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
reg_rx_flr_rss_control1set(self, cfg->is_rss ?
0xB3333333U : 0x00000000U);
/* Multicast filters */
for (i = HW_ATL_A0_MAC_MAX; i--;) {
rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
rpfl2unicast_flr_act_set(self, 1U, i);
}
reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
/* Vlan filters */
rpf_vlan_outer_etht_set(self, 0x88A8U);
rpf_vlan_inner_etht_set(self, 0x8100U);
rpf_vlan_prom_mode_en_set(self, 1);
/* Rx Interrupts */
rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
rpfl2broadcast_flr_act_set(self, 1U);
rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
rdm_rx_dca_en_set(self, 0U);
rdm_rx_dca_mode_set(self, 0U);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
int err = 0;
unsigned int h = 0U;
unsigned int l = 0U;
if (!mac_addr) {
err = -EINVAL;
goto err_exit;
}
h = (mac_addr[0] << 8) | (mac_addr[1]);
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_a0_hw_init(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg,
u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
{ 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
{ 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
{ 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */
};
int err = 0;
self->aq_nic_cfg = aq_nic_cfg;
hw_atl_utils_hw_chip_features_init(self,
&PHAL_ATLANTIC_A0->chip_features);
hw_atl_a0_hw_init_tx_path(self);
hw_atl_a0_hw_init_rx_path(self);
hw_atl_a0_hw_mac_addr_set(self, mac_addr);
hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
hw_atl_a0_hw_qos_set(self);
hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
err = aq_hw_err_from_flags(self);
if (err < 0)
goto err_exit;
/* Interrupts */
reg_irq_glb_ctl_set(self,
aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
[(aq_nic_cfg->vecs > 1U) ?
1 : 0]);
itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
/* Interrupts */
reg_gen_irq_map_set(self,
((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
hw_atl_a0_hw_offload_set(self, aq_nic_cfg);
err_exit:
return err;
}
static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_start(struct aq_hw_s *self)
{
tpb_tx_buff_en_set(self, 1);
rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
struct aq_ring_s *ring,
unsigned int frags)
{
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
unsigned int buff_pa_len = 0U;
unsigned int pkt_len = 0U;
unsigned int frag_count = 0U;
bool is_gso = false;
buff = &ring->buff_ring[ring->sw_tail];
pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
for (frag_count = 0; frag_count < frags; frag_count++) {
txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
HW_ATL_A0_TXD_SIZE];
txd->ctl = 0;
txd->ctl2 = 0;
txd->buf_addr = 0;
buff = &ring->buff_ring[ring->sw_tail];
if (buff->is_txc) {
txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24) |
HW_ATL_A0_TXD_CTL_CMD_TCP |
HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC;
txd->ctl2 |= (buff->mss << 16) |
(buff->len_l4 << 8) |
(buff->len_l3 >> 1);
pkt_len -= (buff->len_l4 +
buff->len_l3 +
buff->len_l2);
is_gso = true;
} else {
buff_pa_len = buff->len;
txd->buf_addr = buff->pa;
txd->ctl |= (HW_ATL_A0_TXD_CTL_BLEN &
((u32)buff_pa_len << 4));
txd->ctl |= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD;
/* PAY_LEN */
txd->ctl2 |= HW_ATL_A0_TXD_CTL2_LEN & (pkt_len << 14);
if (is_gso) {
txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_LSO;
txd->ctl2 |= HW_ATL_A0_TXD_CTL2_CTX_EN;
}
/* Tx checksum offloads */
if (buff->is_ip_cso)
txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPCSO;
if (buff->is_udp_cso || buff->is_tcp_cso)
txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_TUCSO;
if (unlikely(buff->is_eop)) {
txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
}
}
ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
}
hw_atl_a0_hw_tx_ring_tail_update(self, ring);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
rdm_rx_desc_en_set(self, false, aq_ring->idx);
rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
aq_ring->idx);
reg_rx_dma_desc_base_addressmswset(self,
dma_desc_addr_msw, aq_ring->idx);
rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
rdm_rx_desc_data_buff_size_set(self,
AQ_CFG_RX_FRAME_MAX / 1024U,
aq_ring->idx);
rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
/* Rx ring set mode */
/* Mapping interrupt vector */
itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
itr_irq_map_en_rx_set(self, true, aq_ring->idx);
rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
aq_ring->idx);
reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
aq_ring->idx);
tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring);
/* Set Tx threshold */
tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
/* Mapping interrupt vector */
itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
itr_irq_map_en_tx_set(self, true, aq_ring->idx);
tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
struct aq_ring_s *ring,
unsigned int sw_tail_old)
{
for (; sw_tail_old != ring->sw_tail;
sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
struct hw_atl_rxd_s *rxd =
(struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
HW_ATL_A0_RXD_SIZE];
struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
rxd->buf_addr = buff->pa;
rxd->hdr_addr = 0U;
}
reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
int err = 0;
unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
struct device *ndev = aq_nic_get_dev(ring->aq_nic);
for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
&ring->dx_ring[ring->hw_head * HW_ATL_A0_RXD_SIZE];
unsigned int is_err = 1U;
unsigned int is_rx_check_sum_enabled = 0U;
unsigned int pkt_type = 0U;
if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */
if ((1U << 4) &
reg_rx_dma_desc_status_get(self, ring->idx)) {
rdm_rx_desc_en_set(self, false, ring->idx);
rdm_rx_desc_res_set(self, true, ring->idx);
rdm_rx_desc_res_set(self, false, ring->idx);
rdm_rx_desc_en_set(self, true, ring->idx);
}
if (ring->hw_head ||
(rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) {
break;
} else if (!(rxd_wb->status & 0x1U)) {
struct hw_atl_rxd_wb_s *rxd_wb1 =
(struct hw_atl_rxd_wb_s *)
(&ring->dx_ring[(1U) *
HW_ATL_A0_RXD_SIZE]);
if ((rxd_wb1->status & 0x1U)) {
rxd_wb->pkt_len = 1514U;
rxd_wb->status = 3U;
} else {
break;
}
}
}
buff = &ring->buff_ring[ring->hw_head];
if (0x3U != (rxd_wb->status & 0x3U))
rxd_wb->status |= 4;
is_err = (0x0000001CU & rxd_wb->status);
is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
pkt_type = 0xFFU & (rxd_wb->type >> 4);
if (is_rx_check_sum_enabled) {
if (0x0U == (pkt_type & 0x3U))
buff->is_ip_cso = (is_err & 0x08U) ? 0 : 1;
if (0x4U == (pkt_type & 0x1CU))
buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
}
is_err &= ~0x18U;
is_err &= ~0x04U;
dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
if (is_err || rxd_wb->type & 0x1000U) {
/* status error or DMA error */
buff->is_error = 1U;
} else {
if (self->aq_nic_cfg->is_rss) {
/* last 4 byte */
u16 rss_type = rxd_wb->type & 0xFU;
if (rss_type && rss_type < 0x8U) {
buff->is_hash_l4 = (rss_type == 0x4 ||
rss_type == 0x5);
buff->rss_hash = rxd_wb->rss_hash;
}
}
if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = (rxd_wb->pkt_len &
(AQ_CFG_RX_FRAME_MAX - 1U));
buff->len = buff->len ?
buff->len : AQ_CFG_RX_FRAME_MAX;
buff->next = 0U;
buff->is_eop = 1U;
} else {
/* jumbo */
buff->next = aq_ring_next_dx(ring,
ring->hw_head);
++ring->stats.rx.jumbo_packets;
}
}
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
itr_irq_msk_setlsw_set(self, LODWORD(mask) |
(1U << HW_ATL_A0_ERR_INT));
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
itr_irq_msk_clearlsw_set(self, LODWORD(mask));
itr_irq_status_clearlsw_set(self, LODWORD(mask));
if ((1U << 16) & reg_gen_irq_status_get(self))
atomic_inc(&PHAL_ATLANTIC_A0->dpc);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
unsigned int packet_filter)
{
unsigned int i = 0U;
rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0);
rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
self->aq_nic_cfg->is_mc_list_enabled =
IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled &&
(i <= self->aq_nic_cfg->mc_list_count)) ?
1U : 0U, i);
return aq_hw_err_from_flags(self);
}
#undef IS_FILTER_ENABLED
static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
u8 ar_mac
[AQ_CFG_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
u32 count)
{
int err = 0;
if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) {
err = EBADRQC;
goto err_exit;
}
for (self->aq_nic_cfg->mc_list_count = 0U;
self->aq_nic_cfg->mc_list_count < count;
++self->aq_nic_cfg->mc_list_count) {
u32 i = self->aq_nic_cfg->mc_list_count;
u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
rpfl2unicast_dest_addresslsw_set(self,
l, HW_ATL_A0_MAC_MIN + i);
rpfl2unicast_dest_addressmsw_set(self,
h, HW_ATL_A0_MAC_MIN + i);
rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled),
HW_ATL_A0_MAC_MIN + i);
}
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
bool itr_enabled)
{
unsigned int i = 0U;
if (itr_enabled && self->aq_nic_cfg->itr) {
if (self->aq_nic_cfg->itr != 0xFFFFU) {
u32 itr_ = (self->aq_nic_cfg->itr >> 1);
itr_ = min(AQ_CFG_IRQ_MASK, itr_);
PHAL_ATLANTIC_A0->itr_rx = 0x80000000U |
(itr_ << 0x10);
} else {
u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
if (n < self->aq_link_status.mbps) {
PHAL_ATLANTIC_A0->itr_rx = 0U;
} else {
static unsigned int hw_timers_tbl_[] = {
0x01CU, /* 10Gbit */
0x039U, /* 5Gbit */
0x039U, /* 5Gbit 5GS */
0x073U, /* 2.5Gbit */
0x120U, /* 1Gbit */
0x1FFU, /* 100Mbit */
};
unsigned int speed_index =
hw_atl_utils_mbps_2_speed_index(
self->aq_link_status.mbps);
PHAL_ATLANTIC_A0->itr_rx =
0x80000000U |
(hw_timers_tbl_[speed_index] << 0x10U);
}
aq_hw_write_reg(self, 0x00002A00U, 0x40000000U);
aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
}
} else {
PHAL_ATLANTIC_A0->itr_rx = 0U;
}
for (i = HW_ATL_A0_RINGS_MAX; i--;)
reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
{
hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed)
{
int err = 0;
err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
if (err < 0)
goto err_exit;
err_exit:
return err;
}
static struct aq_hw_ops hw_atl_ops_ = {
.create = hw_atl_a0_create,
.destroy = hw_atl_a0_destroy,
.get_hw_caps = hw_atl_a0_get_hw_caps,
.hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
.hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
.hw_get_link_status = hw_atl_utils_mpi_get_link_status,
.hw_set_link_speed = hw_atl_a0_hw_set_speed,
.hw_init = hw_atl_a0_hw_init,
.hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
.hw_reset = hw_atl_a0_hw_reset,
.hw_start = hw_atl_a0_hw_start,
.hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start,
.hw_ring_tx_stop = hw_atl_a0_hw_ring_tx_stop,
.hw_ring_rx_start = hw_atl_a0_hw_ring_rx_start,
.hw_ring_rx_stop = hw_atl_a0_hw_ring_rx_stop,
.hw_stop = hw_atl_a0_hw_stop,
.hw_ring_tx_xmit = hw_atl_a0_hw_ring_tx_xmit,
.hw_ring_tx_head_update = hw_atl_a0_hw_ring_tx_head_update,
.hw_ring_rx_receive = hw_atl_a0_hw_ring_rx_receive,
.hw_ring_rx_fill = hw_atl_a0_hw_ring_rx_fill,
.hw_irq_enable = hw_atl_a0_hw_irq_enable,
.hw_irq_disable = hw_atl_a0_hw_irq_disable,
.hw_irq_read = hw_atl_a0_hw_irq_read,
.hw_ring_rx_init = hw_atl_a0_hw_ring_rx_init,
.hw_ring_tx_init = hw_atl_a0_hw_ring_tx_init,
.hw_packet_filter_set = hw_atl_a0_hw_packet_filter_set,
.hw_multicast_list_set = hw_atl_a0_hw_multicast_list_set,
.hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl_a0_hw_rss_set,
.hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
{
bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
(pdev->device == HW_ATL_DEVICE_ID_D100) ||
(pdev->device == HW_ATL_DEVICE_ID_D107) ||
(pdev->device == HW_ATL_DEVICE_ID_D108) ||
(pdev->device == HW_ATL_DEVICE_ID_D109));
bool is_rev_ok = (pdev->revision == 1U);
return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
}
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File hw_atl_a0.h: Declaration of abstract interface for Atlantic hardware
* specific functions.
*/
#ifndef HW_ATL_A0_H
#define HW_ATL_A0_H
#include "../aq_common.h"
#ifndef PCI_VENDOR_ID_AQUANTIA
#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
#define HW_ATL_DEVICE_ID_0001 0x0001
#define HW_ATL_DEVICE_ID_D100 0xD100
#define HW_ATL_DEVICE_ID_D107 0xD107
#define HW_ATL_DEVICE_ID_D108 0xD108
#define HW_ATL_DEVICE_ID_D109 0xD109
#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
#endif
struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
#endif /* HW_ATL_A0_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File hw_atl_a0_internal.h: Definition of Atlantic A0 chip specific
* constants.
*/
#ifndef HW_ATL_A0_INTERNAL_H
#define HW_ATL_A0_INTERNAL_H
#include "../aq_common.h"
#define HW_ATL_A0_MTU_JUMBO 9014U
#define HW_ATL_A0_TX_RINGS 4U
#define HW_ATL_A0_RX_RINGS 4U
#define HW_ATL_A0_RINGS_MAX 32U
#define HW_ATL_A0_TXD_SIZE 16U
#define HW_ATL_A0_RXD_SIZE 16U
#define HW_ATL_A0_MAC 0U
#define HW_ATL_A0_MAC_MIN 1U
#define HW_ATL_A0_MAC_MAX 33U
/* interrupts */
#define HW_ATL_A0_ERR_INT 8U
#define HW_ATL_A0_INT_MASK 0xFFFFFFFFU
#define HW_ATL_A0_TXD_CTL2_LEN 0xFFFFC000U
#define HW_ATL_A0_TXD_CTL2_CTX_EN 0x00002000U
#define HW_ATL_A0_TXD_CTL2_CTX_IDX 0x00001000U
#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD 0x00000001U
#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC 0x00000002U
#define HW_ATL_A0_TXD_CTL_BLEN 0x000FFFF0U
#define HW_ATL_A0_TXD_CTL_DD 0x00100000U
#define HW_ATL_A0_TXD_CTL_EOP 0x00200000U
#define HW_ATL_A0_TXD_CTL_CMD_X 0x3FC00000U
#define HW_ATL_A0_TXD_CTL_CMD_VLAN BIT(22)
#define HW_ATL_A0_TXD_CTL_CMD_FCS BIT(23)
#define HW_ATL_A0_TXD_CTL_CMD_IPCSO BIT(24)
#define HW_ATL_A0_TXD_CTL_CMD_TUCSO BIT(25)
#define HW_ATL_A0_TXD_CTL_CMD_LSO BIT(26)
#define HW_ATL_A0_TXD_CTL_CMD_WB BIT(27)
#define HW_ATL_A0_TXD_CTL_CMD_VXLAN BIT(28)
#define HW_ATL_A0_TXD_CTL_CMD_IPV6 BIT(21)
#define HW_ATL_A0_TXD_CTL_CMD_TCP BIT(22)
#define HW_ATL_A0_MPI_CONTROL_ADR 0x0368U
#define HW_ATL_A0_MPI_STATE_ADR 0x036CU
#define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU
#define HW_ATL_A0_MPI_SPEED_SHIFT 16U
#define HW_ATL_A0_RATE_10G BIT(0)
#define HW_ATL_A0_RATE_5G BIT(1)
#define HW_ATL_A0_RATE_2G5 BIT(3)
#define HW_ATL_A0_RATE_1G BIT(4)
#define HW_ATL_A0_RATE_100M BIT(5)
#define HW_ATL_A0_TXBUF_MAX 160U
#define HW_ATL_A0_RXBUF_MAX 320U
#define HW_ATL_A0_RSS_REDIRECTION_MAX 64U
#define HW_ATL_A0_RSS_REDIRECTION_BITS 3U
#define HW_ATL_A0_TC_MAX 1U
#define HW_ATL_A0_RSS_MAX 8U
#define HW_ATL_A0_FW_SEMA_RAM 0x2U
#define HW_ATL_A0_RXD_DD 0x1U
#define HW_ATL_A0_RXD_NCEA0 0x1U
#define HW_ATL_A0_RXD_WB_STAT2_EOP 0x0002U
#define HW_ATL_A0_UCP_0X370_REG 0x370U
#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
/* Hardware tx descriptor */
struct __packed hw_atl_txd_s {
u64 buf_addr;
u32 ctl;
u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
};
/* Hardware tx context descriptor */
struct __packed hw_atl_txc_s {
u32 rsvd;
u32 len;
u32 ctl;
u32 len2;
};
/* Hardware rx descriptor */
struct __packed hw_atl_rxd_s {
u64 buf_addr;
u64 hdr_addr;
};
/* Hardware rx descriptor writeback */
struct __packed hw_atl_rxd_wb_s {
u32 type;
u32 rss_hash;
u16 status;
u16 pkt_len;
u16 next_desc_ptr;
u16 vlan;
};
/* HW layer capabilities */
static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
.ports = 1U,
.is_64_dma = true,
.msix_irqs = 4U,
.irq_mask = ~0U,
.vecs = HW_ATL_A0_RSS_MAX,
.tcs = HW_ATL_A0_TC_MAX,
.rxd_alignment = 1U,
.rxd_size = HW_ATL_A0_RXD_SIZE,
.rxds = 248U,
.txd_alignment = 1U,
.txd_size = HW_ATL_A0_TXD_SIZE,
.txds = 8U * 1024U,
.txhwb_alignment = 4096U,
.tx_rings = HW_ATL_A0_TX_RINGS,
.rx_rings = HW_ATL_A0_RX_RINGS,
.hw_features = NETIF_F_HW_CSUM |
NETIF_F_RXHASH |
NETIF_F_SG |
NETIF_F_TSO,
.hw_priv_flags = IFF_UNICAST_FLT,
.link_speed_msk = (HW_ATL_A0_RATE_10G |
HW_ATL_A0_RATE_5G |
HW_ATL_A0_RATE_2G5 |
HW_ATL_A0_RATE_1G |
HW_ATL_A0_RATE_100M),
.flow_control = true,
.mtu = HW_ATL_A0_MTU_JUMBO,
.mac_regs_count = 88,
.fw_ver_expected = HW_ATL_A0_FW_VER_EXPECTED,
};
#endif /* HW_ATL_A0_INTERNAL_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_ring.h"
#include "hw_atl_b0.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#include "hw_atl_b0_internal.h"
static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps)
{
memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
return 0;
}
static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func,
unsigned int port,
struct aq_hw_ops *ops)
{
struct hw_atl_s *self = NULL;
self = kzalloc(sizeof(*self), GFP_KERNEL);
if (!self)
goto err_exit;
self->base.aq_pci_func = aq_pci_func;
self->base.not_ff_addr = 0x10U;
err_exit:
return (struct aq_hw_s *)self;
}
static void hw_atl_b0_destroy(struct aq_hw_s *self)
{
kfree(self);
}
static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
glb_glb_reg_res_dis_set(self, 1U);
pci_pci_reg_res_dis_set(self, 0U);
rx_rx_reg_res_dis_set(self, 0U);
tx_tx_reg_res_dis_set(self, 0U);
HW_ATL_FLUSH();
glb_soft_res_set(self, 1);
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
itr_irq_reg_res_dis_set(self, 0U);
itr_res_irq_set(self, 1U);
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
u32 tc = 0U;
u32 buff_size = 0U;
unsigned int i_priority = 0U;
bool is_rx_flow_control = false;
/* TPS Descriptor rate init */
tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
/* TPS VM init */
tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
/* Tx buf size */
buff_size = HW_ATL_B0_TXBUF_MAX;
tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
tpb_tx_buff_hi_threshold_per_tc_set(self,
(buff_size * (1024 / 32U) * 66U) /
100U, tc);
tpb_tx_buff_lo_threshold_per_tc_set(self,
(buff_size * (1024 / 32U) * 50U) /
100U, tc);
/* QoS Rx buf size per TC */
tc = 0;
is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
buff_size = HW_ATL_B0_RXBUF_MAX;
rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
rpb_rx_buff_hi_threshold_per_tc_set(self,
(buff_size *
(1024U / 32U) * 66U) /
100U, tc);
rpb_rx_buff_lo_threshold_per_tc_set(self,
(buff_size *
(1024U / 32U) * 50U) /
100U, tc);
rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
struct aq_nic_cfg_s *cfg = NULL;
int err = 0;
unsigned int i = 0U;
unsigned int addr = 0U;
cfg = self->aq_nic_cfg;
for (i = 10, addr = 0U; i--; ++addr) {
u32 key_data = cfg->is_rss ?
__swab32(rss_params->hash_secret_key[i]) : 0U;
rpf_rss_key_wr_data_set(self, key_data);
rpf_rss_key_addr_set(self, addr);
rpf_rss_key_wr_en_set(self, 1U);
AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
}
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
u8 *indirection_table = rss_params->indirection_table;
u32 i = 0U;
u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
int err = 0;
u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX *
HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
memset(bitary, 0, sizeof(bitary));
for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
(*(u32 *)(bitary + ((i * 3U) / 16U))) |=
((indirection_table[i] % num_rss_queues) <<
((i * 3U) & 0xFU));
}
for (i = AQ_DIMOF(bitary); i--;) {
rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
rpf_rss_redir_tbl_addr_set(self, i);
rpf_rss_redir_wr_en_set(self, 1U);
AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
if (err < 0)
goto err_exit;
}
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg)
{
int err = 0;
unsigned int i;
/* TX checksums offloads*/
tpo_ipv4header_crc_offload_en_set(self, 1);
tpo_tcp_udp_crc_offload_en_set(self, 1);
if (err < 0)
goto err_exit;
/* RX checksums offloads*/
rpo_ipv4header_crc_offload_en_set(self, 1);
rpo_tcp_udp_crc_offload_en_set(self, 1);
if (err < 0)
goto err_exit;
/* LSO offloads*/
tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
if (err < 0)
goto err_exit;
/* LRO offloads */
{
unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
rpo_lro_max_num_of_descriptors_set(self, val, i);
rpo_lro_time_base_divider_set(self, 0x61AU);
rpo_lro_inactive_interval_set(self, 0);
rpo_lro_max_coalescing_interval_set(self, 2);
rpo_lro_qsessions_lim_set(self, 1U);
rpo_lro_total_desc_lim_set(self, 2U);
rpo_lro_patch_optimization_en_set(self, 0U);
rpo_lro_min_pay_of_first_pkt_set(self, 10U);
rpo_lro_pkt_lim_set(self, 1U);
rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
}
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
/* Tx interrupts */
tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
0x00010000U : 0x00000000U);
tdm_tx_dca_en_set(self, 0U);
tdm_tx_dca_mode_set(self, 0U);
tpb_tx_path_scp_ins_en_set(self, 1U);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int i;
/* Rx TC/RSS number config */
rpb_rpf_rx_traf_class_mode_set(self, 1U);
/* Rx flow control */
rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
reg_rx_flr_rss_control1set(self, cfg->is_rss ?
0xB3333333U : 0x00000000U);
/* Multicast filters */
for (i = HW_ATL_B0_MAC_MAX; i--;) {
rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
rpfl2unicast_flr_act_set(self, 1U, i);
}
reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
/* Vlan filters */
rpf_vlan_outer_etht_set(self, 0x88A8U);
rpf_vlan_inner_etht_set(self, 0x8100U);
if (cfg->vlan_id) {
rpf_vlan_flr_act_set(self, 1U, 0U);
rpf_vlan_id_flr_set(self, 0U, 0U);
rpf_vlan_flr_en_set(self, 0U, 0U);
rpf_vlan_accept_untagged_packets_set(self, 1U);
rpf_vlan_untagged_act_set(self, 1U);
rpf_vlan_flr_act_set(self, 1U, 1U);
rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
rpf_vlan_flr_en_set(self, 1U, 1U);
} else {
rpf_vlan_prom_mode_en_set(self, 1);
}
/* Rx Interrupts */
rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
/* misc */
aq_hw_write_reg(self, 0x00005040U,
IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
rpfl2broadcast_flr_act_set(self, 1U);
rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
rdm_rx_dca_en_set(self, 0U);
rdm_rx_dca_mode_set(self, 0U);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
{
int err = 0;
unsigned int h = 0U;
unsigned int l = 0U;
if (!mac_addr) {
err = -EINVAL;
goto err_exit;
}
h = (mac_addr[0] << 8) | (mac_addr[1]);
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_b0_hw_init(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg,
u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
{ 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
{ 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
{ 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
{ 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */
};
int err = 0;
self->aq_nic_cfg = aq_nic_cfg;
hw_atl_utils_hw_chip_features_init(self,
&PHAL_ATLANTIC_B0->chip_features);
hw_atl_b0_hw_init_tx_path(self);
hw_atl_b0_hw_init_rx_path(self);
hw_atl_b0_hw_mac_addr_set(self, mac_addr);
hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
hw_atl_b0_hw_qos_set(self);
hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
err = aq_hw_err_from_flags(self);
if (err < 0)
goto err_exit;
/* Interrupts */
reg_irq_glb_ctl_set(self,
aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
[(aq_nic_cfg->vecs > 1U) ?
1 : 0]);
itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
/* Interrupts */
reg_gen_irq_map_set(self,
((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
err_exit:
return err;
}
static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
tdm_tx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
rdm_rx_desc_en_set(self, 1, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_start(struct aq_hw_s *self)
{
tpb_tx_buff_en_set(self, 1);
rpb_rx_buff_en_set(self, 1);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
return 0;
}
static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
struct aq_ring_s *ring,
unsigned int frags)
{
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_txd_s *txd = NULL;
unsigned int buff_pa_len = 0U;
unsigned int pkt_len = 0U;
unsigned int frag_count = 0U;
bool is_gso = false;
buff = &ring->buff_ring[ring->sw_tail];
pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
for (frag_count = 0; frag_count < frags; frag_count++) {
txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
HW_ATL_B0_TXD_SIZE];
txd->ctl = 0;
txd->ctl2 = 0;
txd->buf_addr = 0;
buff = &ring->buff_ring[ring->sw_tail];
if (buff->is_txc) {
txd->ctl |= (buff->len_l3 << 31) |
(buff->len_l2 << 24) |
HW_ATL_B0_TXD_CTL_CMD_TCP |
HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
txd->ctl2 |= (buff->mss << 16) |
(buff->len_l4 << 8) |
(buff->len_l3 >> 1);
pkt_len -= (buff->len_l4 +
buff->len_l3 +
buff->len_l2);
is_gso = true;
} else {
buff_pa_len = buff->len;
txd->buf_addr = buff->pa;
txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
((u32)buff_pa_len << 4));
txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
/* PAY_LEN */
txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
if (is_gso) {
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
}
/* Tx checksum offloads */
if (buff->is_ip_cso)
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
if (buff->is_udp_cso || buff->is_tcp_cso)
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
if (unlikely(buff->is_eop)) {
txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
}
}
ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
}
hw_atl_b0_hw_tx_ring_tail_update(self, ring);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
rdm_rx_desc_en_set(self, false, aq_ring->idx);
rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
aq_ring->idx);
reg_rx_dma_desc_base_addressmswset(self,
dma_desc_addr_msw, aq_ring->idx);
rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
rdm_rx_desc_data_buff_size_set(self,
AQ_CFG_RX_FRAME_MAX / 1024U,
aq_ring->idx);
rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
/* Rx ring set mode */
/* Mapping interrupt vector */
itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
itr_irq_map_en_rx_set(self, true, aq_ring->idx);
rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
struct aq_ring_s *aq_ring,
struct aq_ring_param_s *aq_ring_param)
{
u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
aq_ring->idx);
reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
aq_ring->idx);
tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
/* Set Tx threshold */
tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
/* Mapping interrupt vector */
itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
itr_irq_map_en_tx_set(self, true, aq_ring->idx);
tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
struct aq_ring_s *ring,
unsigned int sw_tail_old)
{
for (; sw_tail_old != ring->sw_tail;
sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
struct hw_atl_rxd_s *rxd =
(struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
HW_ATL_B0_RXD_SIZE];
struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
rxd->buf_addr = buff->pa;
rxd->hdr_addr = 0U;
}
reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
int err = 0;
unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
err = -ENXIO;
goto err_exit;
}
ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
struct device *ndev = aq_nic_get_dev(ring->aq_nic);
for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
struct aq_ring_buff_s *buff = NULL;
struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
&ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
unsigned int is_err = 1U;
unsigned int is_rx_check_sum_enabled = 0U;
unsigned int pkt_type = 0U;
if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
break;
}
buff = &ring->buff_ring[ring->hw_head];
is_err = (0x0000003CU & rxd_wb->status);
is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
is_err &= ~0x20U; /* exclude validity bit */
pkt_type = 0xFFU & (rxd_wb->type >> 4);
if (is_rx_check_sum_enabled) {
if (0x0U == (pkt_type & 0x3U))
buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
if (0x4U == (pkt_type & 0x1CU))
buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
}
is_err &= ~0x18U;
dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
if (is_err || rxd_wb->type & 0x1000U) {
/* status error or DMA error */
buff->is_error = 1U;
} else {
if (self->aq_nic_cfg->is_rss) {
/* last 4 byte */
u16 rss_type = rxd_wb->type & 0xFU;
if (rss_type && rss_type < 0x8U) {
buff->is_hash_l4 = (rss_type == 0x4 ||
rss_type == 0x5);
buff->rss_hash = rxd_wb->rss_hash;
}
}
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = (rxd_wb->pkt_len &
(AQ_CFG_RX_FRAME_MAX - 1U));
buff->len = buff->len ?
buff->len : AQ_CFG_RX_FRAME_MAX;
buff->next = 0U;
buff->is_eop = 1U;
} else {
if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
rxd_wb->status) {
/* LRO */
buff->next = rxd_wb->next_desc_ptr;
++ring->stats.rx.lro_packets;
} else {
/* jumbo */
buff->next =
aq_ring_next_dx(ring,
ring->hw_head);
++ring->stats.rx.jumbo_packets;
}
}
}
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
{
itr_irq_msk_setlsw_set(self, LODWORD(mask));
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
{
itr_irq_msk_clearlsw_set(self, LODWORD(mask));
itr_irq_status_clearlsw_set(self, LODWORD(mask));
atomic_inc(&PHAL_ATLANTIC_B0->dpc);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
{
*mask = itr_irq_statuslsw_get(self);
return aq_hw_err_from_flags(self);
}
#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
unsigned int packet_filter)
{
unsigned int i = 0U;
rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
rpfl2multicast_flr_en_set(self,
IS_FILTER_ENABLED(IFF_MULTICAST), 0);
rpfl2_accept_all_mc_packets_set(self,
IS_FILTER_ENABLED(IFF_ALLMULTI));
rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled &&
(i <= self->aq_nic_cfg->mc_list_count)) ?
1U : 0U, i);
return aq_hw_err_from_flags(self);
}
#undef IS_FILTER_ENABLED
static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
u8 ar_mac
[AQ_CFG_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
u32 count)
{
int err = 0;
if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
err = -EBADRQC;
goto err_exit;
}
for (self->aq_nic_cfg->mc_list_count = 0U;
self->aq_nic_cfg->mc_list_count < count;
++self->aq_nic_cfg->mc_list_count) {
u32 i = self->aq_nic_cfg->mc_list_count;
u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
rpfl2unicast_dest_addresslsw_set(self,
l, HW_ATL_B0_MAC_MIN + i);
rpfl2unicast_dest_addressmsw_set(self,
h, HW_ATL_B0_MAC_MIN + i);
rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled),
HW_ATL_B0_MAC_MIN + i);
}
err = aq_hw_err_from_flags(self);
err_exit:
return err;
}
static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
bool itr_enabled)
{
unsigned int i = 0U;
if (itr_enabled && self->aq_nic_cfg->itr) {
tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
tdm_tdm_intr_moder_en_set(self, 1U);
rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
rdm_rdm_intr_moder_en_set(self, 1U);
PHAL_ATLANTIC_B0->itr_tx = 2U;
PHAL_ATLANTIC_B0->itr_rx = 2U;
if (self->aq_nic_cfg->itr != 0xFFFFU) {
unsigned int max_timer = self->aq_nic_cfg->itr / 2U;
unsigned int min_timer = self->aq_nic_cfg->itr / 32U;
max_timer = min(0x1FFU, max_timer);
min_timer = min(0xFFU, min_timer);
PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U;
PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U;
PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U;
PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U;
} else {
static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
{0xffU, 0xffU}, /* 10Gbit */
{0xffU, 0x1ffU}, /* 5Gbit */
{0xffU, 0x1ffU}, /* 5Gbit 5GS */
{0xffU, 0x1ffU}, /* 2.5Gbit */
{0xffU, 0x1ffU}, /* 1Gbit */
{0xffU, 0x1ffU}, /* 100Mbit */
};
static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
{0x6U, 0x38U},/* 10Gbit */
{0xCU, 0x70U},/* 5Gbit */
{0xCU, 0x70U},/* 5Gbit 5GS */
{0x18U, 0xE0U},/* 2.5Gbit */
{0x30U, 0x80U},/* 1Gbit */
{0x4U, 0x50U},/* 100Mbit */
};
unsigned int speed_index =
hw_atl_utils_mbps_2_speed_index(
self->aq_link_status.mbps);
PHAL_ATLANTIC_B0->itr_tx |=
hw_atl_b0_timers_table_tx_[speed_index]
[0] << 0x8U; /* set min timer value */
PHAL_ATLANTIC_B0->itr_tx |=
hw_atl_b0_timers_table_tx_[speed_index]
[1] << 0x10U; /* set max timer value */
PHAL_ATLANTIC_B0->itr_rx |=
hw_atl_b0_timers_table_rx_[speed_index]
[0] << 0x8U; /* set min timer value */
PHAL_ATLANTIC_B0->itr_rx |=
hw_atl_b0_timers_table_rx_[speed_index]
[1] << 0x10U; /* set max timer value */
}
} else {
tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
tdm_tdm_intr_moder_en_set(self, 0U);
rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
rdm_rdm_intr_moder_en_set(self, 0U);
PHAL_ATLANTIC_B0->itr_tx = 0U;
PHAL_ATLANTIC_B0->itr_rx = 0U;
}
for (i = HW_ATL_B0_RINGS_MAX; i--;) {
reg_tx_intr_moder_ctrl_set(self,
PHAL_ATLANTIC_B0->itr_tx, i);
reg_rx_intr_moder_ctrl_set(self,
PHAL_ATLANTIC_B0->itr_rx, i);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
{
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
tdm_tx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
rdm_rx_desc_en_set(self, 0U, ring->idx);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed)
{
int err = 0;
err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
if (err < 0)
goto err_exit;
err_exit:
return err;
}
static struct aq_hw_ops hw_atl_ops_ = {
.create = hw_atl_b0_create,
.destroy = hw_atl_b0_destroy,
.get_hw_caps = hw_atl_b0_get_hw_caps,
.hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_get_link_status = hw_atl_utils_mpi_get_link_status,
.hw_set_link_speed = hw_atl_b0_hw_set_speed,
.hw_init = hw_atl_b0_hw_init,
.hw_deinit = hw_atl_utils_hw_deinit,
.hw_set_power = hw_atl_utils_hw_set_power,
.hw_reset = hw_atl_b0_hw_reset,
.hw_start = hw_atl_b0_hw_start,
.hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
.hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop,
.hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start,
.hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop,
.hw_stop = hw_atl_b0_hw_stop,
.hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit,
.hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update,
.hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive,
.hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill,
.hw_irq_enable = hw_atl_b0_hw_irq_enable,
.hw_irq_disable = hw_atl_b0_hw_irq_disable,
.hw_irq_read = hw_atl_b0_hw_irq_read,
.hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
.hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
.hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
.hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl_b0_hw_rss_set,
.hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
{
bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
(pdev->device == HW_ATL_DEVICE_ID_D100) ||
(pdev->device == HW_ATL_DEVICE_ID_D107) ||
(pdev->device == HW_ATL_DEVICE_ID_D108) ||
(pdev->device == HW_ATL_DEVICE_ID_D109));
bool is_rev_ok = (pdev->revision == 2U);
return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
}
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware
* specific functions.
*/
#ifndef HW_ATL_B0_H
#define HW_ATL_B0_H
#include "../aq_common.h"
#ifndef PCI_VENDOR_ID_AQUANTIA
#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
#define HW_ATL_DEVICE_ID_0001 0x0001
#define HW_ATL_DEVICE_ID_D100 0xD100
#define HW_ATL_DEVICE_ID_D107 0xD107
#define HW_ATL_DEVICE_ID_D108 0xD108
#define HW_ATL_DEVICE_ID_D109 0xD109
#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
#endif
struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
#endif /* HW_ATL_B0_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific
* constants.
*/
#ifndef HW_ATL_B0_INTERNAL_H
#define HW_ATL_B0_INTERNAL_H
#include "../aq_common.h"
#define HW_ATL_B0_MTU_JUMBO (16000U)
#define HW_ATL_B0_MTU 1514U
#define HW_ATL_B0_TX_RINGS 4U
#define HW_ATL_B0_RX_RINGS 4U
#define HW_ATL_B0_RINGS_MAX 32U
#define HW_ATL_B0_TXD_SIZE (16U)
#define HW_ATL_B0_RXD_SIZE (16U)
#define HW_ATL_B0_MAC 0U
#define HW_ATL_B0_MAC_MIN 1U
#define HW_ATL_B0_MAC_MAX 33U
/* UCAST/MCAST filters */
#define HW_ATL_B0_UCAST_FILTERS_MAX 38
#define HW_ATL_B0_MCAST_FILTERS_MAX 8
/* interrupts */
#define HW_ATL_B0_ERR_INT 8U
#define HW_ATL_B0_INT_MASK (0xFFFFFFFFU)
#define HW_ATL_B0_TXD_CTL2_LEN (0xFFFFC000)
#define HW_ATL_B0_TXD_CTL2_CTX_EN (0x00002000)
#define HW_ATL_B0_TXD_CTL2_CTX_IDX (0x00001000)
#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD (0x00000001)
#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC (0x00000002)
#define HW_ATL_B0_TXD_CTL_BLEN (0x000FFFF0)
#define HW_ATL_B0_TXD_CTL_DD (0x00100000)
#define HW_ATL_B0_TXD_CTL_EOP (0x00200000)
#define HW_ATL_B0_TXD_CTL_CMD_X (0x3FC00000)
#define HW_ATL_B0_TXD_CTL_CMD_VLAN BIT(22)
#define HW_ATL_B0_TXD_CTL_CMD_FCS BIT(23)
#define HW_ATL_B0_TXD_CTL_CMD_IPCSO BIT(24)
#define HW_ATL_B0_TXD_CTL_CMD_TUCSO BIT(25)
#define HW_ATL_B0_TXD_CTL_CMD_LSO BIT(26)
#define HW_ATL_B0_TXD_CTL_CMD_WB BIT(27)
#define HW_ATL_B0_TXD_CTL_CMD_VXLAN BIT(28)
#define HW_ATL_B0_TXD_CTL_CMD_IPV6 BIT(21)
#define HW_ATL_B0_TXD_CTL_CMD_TCP BIT(22)
#define HW_ATL_B0_MPI_CONTROL_ADR 0x0368U
#define HW_ATL_B0_MPI_STATE_ADR 0x036CU
#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU
#define HW_ATL_B0_MPI_SPEED_SHIFT 16U
#define HW_ATL_B0_RATE_10G BIT(0)
#define HW_ATL_B0_RATE_5G BIT(1)
#define HW_ATL_B0_RATE_2G5 BIT(3)
#define HW_ATL_B0_RATE_1G BIT(4)
#define HW_ATL_B0_RATE_100M BIT(5)
#define HW_ATL_B0_TXBUF_MAX 160U
#define HW_ATL_B0_RXBUF_MAX 320U
#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U
#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U
#define HW_ATL_B0_RSS_HASHKEY_BITS 320U
#define HW_ATL_B0_TCRSS_4_8 1
#define HW_ATL_B0_TC_MAX 1U
#define HW_ATL_B0_RSS_MAX 8U
#define HW_ATL_B0_LRO_RXD_MAX 2U
#define HW_ATL_B0_RS_SLIP_ENABLED 0U
/* (256k -1(max pay_len) - 54(header)) */
#define HAL_ATL_B0_LSO_MAX_SEGMENT_SIZE 262089U
/* (256k -1(max pay_len) - 74(header)) */
#define HAL_ATL_B0_LSO_IPV6_MAX_SEGMENT_SIZE 262069U
#define HW_ATL_B0_CHIP_REVISION_B0 0xA0U
#define HW_ATL_B0_CHIP_REVISION_UNKNOWN 0xFFU
#define HW_ATL_B0_FW_SEMA_RAM 0x2U
#define HW_ATL_B0_TXC_LEN_TUNLEN (0x0000FF00)
#define HW_ATL_B0_TXC_LEN_OUTLEN (0xFFFF0000)
#define HW_ATL_B0_TXC_CTL_DESC_TYPE (0x00000007)
#define HW_ATL_B0_TXC_CTL_CTX_ID (0x00000008)
#define HW_ATL_B0_TXC_CTL_VLAN (0x000FFFF0)
#define HW_ATL_B0_TXC_CTL_CMD (0x00F00000)
#define HW_ATL_B0_TXC_CTL_L2LEN (0x7F000000)
#define HW_ATL_B0_TXC_CTL_L3LEN (0x80000000) /* L3LEN lsb */
#define HW_ATL_B0_TXC_LEN2_L3LEN (0x000000FF) /* L3LE upper bits */
#define HW_ATL_B0_TXC_LEN2_L4LEN (0x0000FF00)
#define HW_ATL_B0_TXC_LEN2_MSSLEN (0xFFFF0000)
#define HW_ATL_B0_RXD_DD (0x1)
#define HW_ATL_B0_RXD_NCEA0 (0x1)
#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE (0x0000000F)
#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE (0x00000FF0)
#define HW_ATL_B0_RXD_WB_STAT_RXCTRL (0x00180000)
#define HW_ATL_B0_RXD_WB_STAT_SPLHDR (0x00200000)
#define HW_ATL_B0_RXD_WB_STAT_HDRLEN (0xFFC00000)
#define HW_ATL_B0_RXD_WB_STAT2_DD (0x0001)
#define HW_ATL_B0_RXD_WB_STAT2_EOP (0x0002)
#define HW_ATL_B0_RXD_WB_STAT2_RXSTAT (0x003C)
#define HW_ATL_B0_RXD_WB_STAT2_MACERR (0x0004)
#define HW_ATL_B0_RXD_WB_STAT2_IP4ERR (0x0008)
#define HW_ATL_B0_RXD_WB_STAT2_TCPUPDERR (0x0010)
#define HW_ATL_B0_RXD_WB_STAT2_RXESTAT (0x0FC0)
#define HW_ATL_B0_RXD_WB_STAT2_RSCCNT (0xF000)
#define L2_FILTER_ACTION_DISCARD (0x0)
#define L2_FILTER_ACTION_HOST (0x1)
#define HW_ATL_B0_UCP_0X370_REG (0x370)
#define HW_ATL_B0_FLUSH() AQ_HW_READ_REG(self, 0x10)
#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
/* Hardware tx descriptor */
struct __packed hw_atl_txd_s {
u64 buf_addr;
u32 ctl;
u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
};
/* Hardware tx context descriptor */
struct __packed hw_atl_txc_s {
u32 rsvd;
u32 len;
u32 ctl;
u32 len2;
};
/* Hardware rx descriptor */
struct __packed hw_atl_rxd_s {
u64 buf_addr;
u64 hdr_addr;
};
/* Hardware rx descriptor writeback */
struct __packed hw_atl_rxd_wb_s {
u32 type;
u32 rss_hash;
u16 status;
u16 pkt_len;
u16 next_desc_ptr;
u16 vlan;
};
/* HW layer capabilities */
static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
.ports = 1U,
.is_64_dma = true,
.msix_irqs = 4U,
.irq_mask = ~0U,
.vecs = HW_ATL_B0_RSS_MAX,
.tcs = HW_ATL_B0_TC_MAX,
.rxd_alignment = 1U,
.rxd_size = HW_ATL_B0_RXD_SIZE,
.rxds = 8U * 1024U,
.txd_alignment = 1U,
.txd_size = HW_ATL_B0_TXD_SIZE,
.txds = 8U * 1024U,
.txhwb_alignment = 4096U,
.tx_rings = HW_ATL_B0_TX_RINGS,
.rx_rings = HW_ATL_B0_RX_RINGS,
.hw_features = NETIF_F_HW_CSUM |
NETIF_F_RXHASH |
NETIF_F_SG |
NETIF_F_TSO |
NETIF_F_LRO,
.hw_priv_flags = IFF_UNICAST_FLT,
.link_speed_msk = (HW_ATL_B0_RATE_10G |
HW_ATL_B0_RATE_5G |
HW_ATL_B0_RATE_2G5 |
HW_ATL_B0_RATE_1G |
HW_ATL_B0_RATE_100M),
.flow_control = true,
.mtu = HW_ATL_B0_MTU_JUMBO,
.mac_regs_count = 88,
.fw_ver_expected = HW_ATL_B0_FW_VER_EXPECTED,
};
#endif /* HW_ATL_B0_INTERNAL_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File hw_atl_llh.c: Definitions of bitfield and register access functions for
* Atlantic registers.
*/
#include "hw_atl_llh.h"
#include "hw_atl_llh_internal.h"
#include "../aq_hw_utils.h"
/* global */
void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore)
{
aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem);
}
u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
{
return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore));
}
void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
{
aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr,
glb_reg_res_dis_msk,
glb_reg_res_dis_shift,
glb_reg_res_dis);
}
void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
{
aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
glb_soft_res_shift, soft_res);
}
u32 glb_soft_res_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr,
glb_soft_res_msk,
glb_soft_res_shift);
}
u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr);
}
u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, glb_mif_id_adr);
}
/* stats */
u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr);
}
u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr);
}
u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr);
}
u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr);
}
u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr);
}
u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr);
}
u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr);
}
u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr);
}
u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr);
}
/* interrupt */
void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw)
{
aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw);
}
void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
{
/* register address for bitfield imr_rx{r}_en */
static u32 itr_imr_rxren_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
};
/* bitmask for bitfield imr_rx{r}_en */
static u32 itr_imr_rxren_msk[32] = {
0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U
};
/* lower bit position of bitfield imr_rx{r}_en */
static u32 itr_imr_rxren_shift[32] = {
15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U
};
aq_hw_write_reg_bit(aq_hw, itr_imr_rxren_adr[rx],
itr_imr_rxren_msk[rx],
itr_imr_rxren_shift[rx],
irq_map_en_rx);
}
void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
{
/* register address for bitfield imr_tx{t}_en */
static u32 itr_imr_txten_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
};
/* bitmask for bitfield imr_tx{t}_en */
static u32 itr_imr_txten_msk[32] = {
0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U
};
/* lower bit position of bitfield imr_tx{t}_en */
static u32 itr_imr_txten_shift[32] = {
31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U
};
aq_hw_write_reg_bit(aq_hw, itr_imr_txten_adr[tx],
itr_imr_txten_msk[tx],
itr_imr_txten_shift[tx],
irq_map_en_tx);
}
void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
{
/* register address for bitfield imr_rx{r}[4:0] */
static u32 itr_imr_rxr_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
};
/* bitmask for bitfield imr_rx{r}[4:0] */
static u32 itr_imr_rxr_msk[32] = {
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU
};
/* lower bit position of bitfield imr_rx{r}[4:0] */
static u32 itr_imr_rxr_shift[32] = {
8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U
};
aq_hw_write_reg_bit(aq_hw, itr_imr_rxr_adr[rx],
itr_imr_rxr_msk[rx],
itr_imr_rxr_shift[rx],
irq_map_rx);
}
void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
{
/* register address for bitfield imr_tx{t}[4:0] */
static u32 itr_imr_txt_adr[32] = {
0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
};
/* bitmask for bitfield imr_tx{t}[4:0] */
static u32 itr_imr_txt_msk[32] = {
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U
};
/* lower bit position of bitfield imr_tx{t}[4:0] */
static u32 itr_imr_txt_shift[32] = {
24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U
};
aq_hw_write_reg_bit(aq_hw, itr_imr_txt_adr[tx],
itr_imr_txt_msk[tx],
itr_imr_txt_shift[tx],
irq_map_tx);
}
void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw)
{
aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw);
}
void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
{
aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw);
}
void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
{
aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr,
itr_reg_res_dsbl_msk,
itr_reg_res_dsbl_shift, irq_reg_res_dis);
}
void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
u32 irq_status_clearlsw)
{
aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw);
}
u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, itr_isrlsw_adr);
}
u32 itr_res_irq_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
itr_res_shift);
}
void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
{
aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
itr_res_shift, res_irq);
}
/* rdm */
void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca),
rdm_dcadcpuid_msk,
rdm_dcadcpuid_shift, cpuid);
}
void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
{
aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk,
rdm_dca_en_shift, rx_dca_en);
}
void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
{
aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk,
rdm_dca_mode_shift, rx_dca_mode);
}
void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
u32 rx_desc_data_buff_size, u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor),
rdm_descddata_size_msk,
rdm_descddata_size_shift,
rx_desc_data_buff_size);
}
void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca)
{
aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca),
rdm_dcaddesc_en_msk,
rdm_dcaddesc_en_shift,
rx_desc_dca_en);
}
void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor),
rdm_descden_msk,
rdm_descden_shift,
rx_desc_en);
}
void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
u32 rx_desc_head_buff_size, u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor),
rdm_descdhdr_size_msk,
rdm_descdhdr_size_shift,
rx_desc_head_buff_size);
}
void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
u32 rx_desc_head_splitting, u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor),
rdm_descdhdr_split_msk,
rdm_descdhdr_split_shift,
rx_desc_head_splitting);
}
u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor),
rdm_descdhd_msk, rdm_descdhd_shift);
}
void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor),
rdm_descdlen_msk, rdm_descdlen_shift,
rx_desc_len);
}
void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor),
rdm_descdreset_msk, rdm_descdreset_shift,
rx_desc_res);
}
void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
u32 rx_desc_wr_wb_irq_en)
{
aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr,
rdm_int_desc_wrb_en_msk,
rdm_int_desc_wrb_en_shift,
rx_desc_wr_wb_irq_en);
}
void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca)
{
aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca),
rdm_dcadhdr_en_msk,
rdm_dcadhdr_en_shift,
rx_head_dca_en);
}
void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca)
{
aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca),
rdm_dcadpay_en_msk, rdm_dcadpay_en_shift,
rx_pld_dca_en);
}
void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en)
{
aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr,
rdm_int_rim_en_msk,
rdm_int_rim_en_shift,
rdm_intr_moder_en);
}
/* reg */
void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx)
{
aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map);
}
u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, gen_intr_stat_adr);
}
void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
{
aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl);
}
void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
{
aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr);
}
void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
u32 rx_dma_desc_base_addrlsw,
u32 descriptor)
{
aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor),
rx_dma_desc_base_addrlsw);
}
void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
u32 rx_dma_desc_base_addrmsw,
u32 descriptor)
{
aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor),
rx_dma_desc_base_addrmsw);
}
u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor));
}
void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
u32 rx_dma_desc_tail_ptr, u32 descriptor)
{
aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor),
rx_dma_desc_tail_ptr);
}
void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk)
{
aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk);
}
void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
u32 filter)
{
aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr);
}
void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1)
{
aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1);
}
void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2)
{
aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2);
}
void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
u32 rx_intr_moderation_ctl,
u32 queue)
{
aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue),
rx_intr_moderation_ctl);
}
void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl)
{
aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl);
}
void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
u32 tx_dma_desc_base_addrlsw,
u32 descriptor)
{
aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor),
tx_dma_desc_base_addrlsw);
}
void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
u32 tx_dma_desc_base_addrmsw,
u32 descriptor)
{
aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor),
tx_dma_desc_base_addrmsw);
}
void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
u32 tx_dma_desc_tail_ptr, u32 descriptor)
{
aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor),
tx_dma_desc_tail_ptr);
}
void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
u32 tx_intr_moderation_ctl,
u32 queue)
{
aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue),
tx_intr_moderation_ctl);
}
/* RPB: rx packet buffer */
void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
{
aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr,
rpb_dma_sys_lbk_msk,
rpb_dma_sys_lbk_shift, dma_sys_lbk);
}
void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode)
{
aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr,
rpb_rpf_rx_tc_mode_msk,
rpb_rpf_rx_tc_mode_shift,
rx_traf_class_mode);
}
void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
{
aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk,
rpb_rx_buf_en_shift, rx_buff_en);
}
void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_buff_hi_threshold_per_tc,
u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer),
rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift,
rx_buff_hi_threshold_per_tc);
}
void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_buff_lo_threshold_per_tc,
u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer),
rpb_rxblo_thresh_msk,
rpb_rxblo_thresh_shift,
rx_buff_lo_threshold_per_tc);
}
void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
{
aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr,
rpb_rx_fc_mode_msk,
rpb_rx_fc_mode_shift, rx_flow_ctl_mode);
}
void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc, u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer),
rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift,
rx_pkt_buff_size_per_tc);
}
void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer),
rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift,
rx_xoff_en_per_tc);
}
/* rpf */
void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
u32 l2broadcast_count_threshold)
{
aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr,
rpfl2bc_thresh_msk,
rpfl2bc_thresh_shift,
l2broadcast_count_threshold);
}
void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
{
aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk,
rpfl2bc_en_shift, l2broadcast_en);
}
void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act)
{
aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk,
rpfl2bc_act_shift, l2broadcast_flr_act);
}
void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter),
rpfl2mc_enf_msk,
rpfl2mc_enf_shift, l2multicast_flr_en);
}
void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
u32 l2promiscuous_mode_en)
{
aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr,
rpfl2promis_mode_msk,
rpfl2promis_mode_shift,
l2promiscuous_mode_en);
}
void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter),
rpfl2uc_actf_msk, rpfl2uc_actf_shift,
l2unicast_flr_act);
}
void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter),
rpfl2uc_enf_msk,
rpfl2uc_enf_shift, l2unicast_flr_en);
}
void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
u32 l2unicast_dest_addresslsw,
u32 filter)
{
aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter),
l2unicast_dest_addresslsw);
}
void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
u32 l2unicast_dest_addressmsw,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter),
rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift,
l2unicast_dest_addressmsw);
}
void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
u32 l2_accept_all_mc_packets)
{
aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr,
rpfl2mc_accept_all_msk,
rpfl2mc_accept_all_shift,
l2_accept_all_mc_packets);
}
void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
u32 user_priority_tc_map, u32 tc)
{
/* register address for bitfield rx_tc_up{t}[2:0] */
static u32 rpf_rpb_rx_tc_upt_adr[8] = {
0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U,
0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U
};
/* bitmask for bitfield rx_tc_up{t}[2:0] */
static u32 rpf_rpb_rx_tc_upt_msk[8] = {
0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U,
0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U
};
/* lower bit position of bitfield rx_tc_up{t}[2:0] */
static u32 rpf_rpb_rx_tc_upt_shft[8] = {
0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
};
aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc],
rpf_rpb_rx_tc_upt_msk[tc],
rpf_rpb_rx_tc_upt_shft[tc],
user_priority_tc_map);
}
void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
{
aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr,
rpf_rss_key_addr_msk,
rpf_rss_key_addr_shift,
rss_key_addr);
}
void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
{
aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr,
rss_key_wr_data);
}
u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
rpf_rss_key_wr_eni_msk,
rpf_rss_key_wr_eni_shift);
}
void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
{
aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
rpf_rss_key_wr_eni_msk,
rpf_rss_key_wr_eni_shift,
rss_key_wr_en);
}
void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr)
{
aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr,
rpf_rss_redir_addr_msk,
rpf_rss_redir_addr_shift, rss_redir_tbl_addr);
}
void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
u32 rss_redir_tbl_wr_data)
{
aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr,
rpf_rss_redir_wr_data_msk,
rpf_rss_redir_wr_data_shift,
rss_redir_tbl_wr_data);
}
u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
rpf_rss_redir_wr_eni_msk,
rpf_rss_redir_wr_eni_shift);
}
void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
{
aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
rpf_rss_redir_wr_eni_msk,
rpf_rss_redir_wr_eni_shift, rss_redir_wr_en);
}
void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk)
{
aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr,
rpf_tpo_rpf_sys_lbk_msk,
rpf_tpo_rpf_sys_lbk_shift,
tpo_to_rpf_sys_lbk);
}
void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr,
rpf_vl_inner_tpid_msk,
rpf_vl_inner_tpid_shift,
vlan_inner_etht);
}
void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr,
rpf_vl_outer_tpid_msk,
rpf_vl_outer_tpid_shift,
vlan_outer_etht);
}
void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr,
rpf_vl_promis_mode_msk,
rpf_vl_promis_mode_shift,
vlan_prom_mode_en);
}
void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
u32 vlan_accept_untagged_packets)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr,
rpf_vl_accept_untagged_mode_msk,
rpf_vl_accept_untagged_mode_shift,
vlan_accept_untagged_packets);
}
void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr,
rpf_vl_untagged_act_msk,
rpf_vl_untagged_act_shift,
vlan_untagged_act);
}
void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter),
rpf_vl_en_f_msk,
rpf_vl_en_f_shift,
vlan_flr_en);
}
void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter),
rpf_vl_act_f_msk,
rpf_vl_act_f_shift,
vlan_flr_act);
}
void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter),
rpf_vl_id_f_msk,
rpf_vl_id_f_shift,
vlan_id_flr);
}
void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter),
rpf_et_enf_msk,
rpf_et_enf_shift, etht_flr_en);
}
void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
u32 etht_user_priority_en, u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter),
rpf_et_upfen_msk, rpf_et_upfen_shift,
etht_user_priority_en);
}
void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter),
rpf_et_rxqfen_msk, rpf_et_rxqfen_shift,
etht_rx_queue_en);
}
void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter),
rpf_et_upf_msk,
rpf_et_upf_shift, etht_user_priority);
}
void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter),
rpf_et_rxqf_msk,
rpf_et_rxqf_shift, etht_rx_queue);
}
void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter),
rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift,
etht_mgt_queue);
}
void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter),
rpf_et_actf_msk,
rpf_et_actf_shift, etht_flr_act);
}
void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
{
aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter),
rpf_et_valf_msk,
rpf_et_valf_shift, etht_flr);
}
/* RPO: rx packet offload */
void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en)
{
aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr,
rpo_ipv4chk_en_msk,
rpo_ipv4chk_en_shift,
ipv4header_crc_offload_en);
}
void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
u32 rx_desc_vlan_stripping, u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor),
rpo_descdvl_strip_msk,
rpo_descdvl_strip_shift,
rx_desc_vlan_stripping);
}
void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 tcp_udp_crc_offload_en)
{
aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk,
rpol4chk_en_shift, tcp_udp_crc_offload_en);
}
void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
{
aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en);
}
void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
u32 lro_patch_optimization_en)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr,
rpo_lro_ptopt_en_msk,
rpo_lro_ptopt_en_shift,
lro_patch_optimization_en);
}
void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
u32 lro_qsessions_lim)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr,
rpo_lro_qses_lmt_msk,
rpo_lro_qses_lmt_shift,
lro_qsessions_lim);
}
void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr,
rpo_lro_tot_dsc_lmt_msk,
rpo_lro_tot_dsc_lmt_shift,
lro_total_desc_lim);
}
void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
u32 lro_min_pld_of_first_pkt)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr,
rpo_lro_pkt_min_msk,
rpo_lro_pkt_min_shift,
lro_min_pld_of_first_pkt);
}
void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
{
aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim);
}
void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
u32 lro_max_number_of_descriptors,
u32 lro)
{
/* Register address for bitfield lro{L}_des_max[1:0] */
static u32 rpo_lro_ldes_max_adr[32] = {
0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU,
0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU
};
/* Bitmask for bitfield lro{L}_des_max[1:0] */
static u32 rpo_lro_ldes_max_msk[32] = {
0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U
};
/* Lower bit position of bitfield lro{L}_des_max[1:0] */
static u32 rpo_lro_ldes_max_shift[32] = {
0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
};
aq_hw_write_reg_bit(aq_hw, rpo_lro_ldes_max_adr[lro],
rpo_lro_ldes_max_msk[lro],
rpo_lro_ldes_max_shift[lro],
lro_max_number_of_descriptors);
}
void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
u32 lro_time_base_divider)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr,
rpo_lro_tb_div_msk,
rpo_lro_tb_div_shift,
lro_time_base_divider);
}
void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
u32 lro_inactive_interval)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr,
rpo_lro_ina_ival_msk,
rpo_lro_ina_ival_shift,
lro_inactive_interval);
}
void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
u32 lro_max_coalescing_interval)
{
aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr,
rpo_lro_max_ival_msk,
rpo_lro_max_ival_shift,
lro_max_coalescing_interval);
}
/* rx */
void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
{
aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr,
rx_reg_res_dsbl_msk,
rx_reg_res_dsbl_shift,
rx_reg_res_dis);
}
/* tdm */
void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca),
tdm_dcadcpuid_msk,
tdm_dcadcpuid_shift, cpuid);
}
void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
u32 large_send_offload_en)
{
aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en);
}
void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
{
aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk,
tdm_dca_en_shift, tx_dca_en);
}
void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
{
aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk,
tdm_dca_mode_shift, tx_dca_mode);
}
void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca)
{
aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca),
tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift,
tx_desc_dca_en);
}
void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor),
tdm_descden_msk,
tdm_descden_shift,
tx_desc_en);
}
u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
{
return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor),
tdm_descdhd_msk, tdm_descdhd_shift);
}
void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor),
tdm_descdlen_msk,
tdm_descdlen_shift,
tx_desc_len);
}
void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
u32 tx_desc_wr_wb_irq_en)
{
aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr,
tdm_int_desc_wrb_en_msk,
tdm_int_desc_wrb_en_shift,
tx_desc_wr_wb_irq_en);
}
void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
u32 tx_desc_wr_wb_threshold,
u32 descriptor)
{
aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor),
tdm_descdwrb_thresh_msk,
tdm_descdwrb_thresh_shift,
tx_desc_wr_wb_threshold);
}
void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
u32 tdm_irq_moderation_en)
{
aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr,
tdm_int_mod_en_msk,
tdm_int_mod_en_shift,
tdm_irq_moderation_en);
}
/* thm */
void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
u32 lso_tcp_flag_of_first_pkt)
{
aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr,
thm_lso_tcp_flag_first_msk,
thm_lso_tcp_flag_first_shift,
lso_tcp_flag_of_first_pkt);
}
void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
u32 lso_tcp_flag_of_last_pkt)
{
aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr,
thm_lso_tcp_flag_last_msk,
thm_lso_tcp_flag_last_shift,
lso_tcp_flag_of_last_pkt);
}
void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
u32 lso_tcp_flag_of_middle_pkt)
{
aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr,
thm_lso_tcp_flag_mid_msk,
thm_lso_tcp_flag_mid_shift,
lso_tcp_flag_of_middle_pkt);
}
/* TPB: tx packet buffer */
void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
{
aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk,
tpb_tx_buf_en_shift, tx_buff_en);
}
void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_buff_hi_threshold_per_tc,
u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer),
tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift,
tx_buff_hi_threshold_per_tc);
}
void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_buff_lo_threshold_per_tc,
u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer),
tpb_txblo_thresh_msk, tpb_txblo_thresh_shift,
tx_buff_lo_threshold_per_tc);
}
void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
{
aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr,
tpb_dma_sys_lbk_msk,
tpb_dma_sys_lbk_shift,
tx_dma_sys_lbk_en);
}
void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_buff_size_per_tc, u32 buffer)
{
aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer),
tpb_txbbuf_size_msk,
tpb_txbbuf_size_shift,
tx_pkt_buff_size_per_tc);
}
void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
{
aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr,
tpb_tx_scp_ins_en_msk,
tpb_tx_scp_ins_en_shift,
tx_path_scp_ins_en);
}
/* TPO: tx packet offload */
void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en)
{
aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr,
tpo_ipv4chk_en_msk,
tpo_ipv4chk_en_shift,
ipv4header_crc_offload_en);
}
void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 tcp_udp_crc_offload_en)
{
aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr,
tpol4chk_en_msk,
tpol4chk_en_shift,
tcp_udp_crc_offload_en);
}
void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en)
{
aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr,
tpo_pkt_sys_lbk_msk,
tpo_pkt_sys_lbk_shift,
tx_pkt_sys_lbk_en);
}
/* TPS: tx packet scheduler */
void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_data_arb_mode)
{
aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr,
tps_data_tc_arb_mode_msk,
tps_data_tc_arb_mode_shift,
tx_pkt_shed_data_arb_mode);
}
void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
u32 curr_time_res)
{
aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr,
tps_desc_rate_ta_rst_msk,
tps_desc_rate_ta_rst_shift,
curr_time_res);
}
void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_rate_lim)
{
aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr,
tps_desc_rate_lim_msk,
tps_desc_rate_lim_shift,
tx_pkt_shed_desc_rate_lim);
}
void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_arb_mode)
{
aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr,
tps_desc_tc_arb_mode_msk,
tps_desc_tc_arb_mode_shift,
tx_pkt_shed_desc_tc_arb_mode);
}
void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_max_credit,
u32 tc)
{
aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc),
tps_desc_tctcredit_max_msk,
tps_desc_tctcredit_max_shift,
tx_pkt_shed_desc_tc_max_credit);
}
void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_weight, u32 tc)
{
aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc),
tps_desc_tctweight_msk,
tps_desc_tctweight_shift,
tx_pkt_shed_desc_tc_weight);
}
void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_vm_arb_mode)
{
aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr,
tps_desc_vm_arb_mode_msk,
tps_desc_vm_arb_mode_shift,
tx_pkt_shed_desc_vm_arb_mode);
}
void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_max_credit,
u32 tc)
{
aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc),
tps_data_tctcredit_max_msk,
tps_data_tctcredit_max_shift,
tx_pkt_shed_tc_data_max_credit);
}
void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight, u32 tc)
{
aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc),
tps_data_tctweight_msk,
tps_data_tctweight_shift,
tx_pkt_shed_tc_data_weight);
}
/* tx */
void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
{
aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr,
tx_reg_res_dsbl_msk,
tx_reg_res_dsbl_shift, tx_reg_res_dis);
}
/* msm */
u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr,
msm_reg_access_busy_msk,
msm_reg_access_busy_shift);
}
void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
u32 reg_addr_for_indirect_addr)
{
aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr,
msm_reg_addr_msk,
msm_reg_addr_shift,
reg_addr_for_indirect_addr);
}
void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
{
aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr,
msm_reg_rd_strobe_msk,
msm_reg_rd_strobe_shift,
reg_rd_strobe);
}
u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr);
}
void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
{
aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data);
}
void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
{
aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr,
msm_reg_wr_strobe_msk,
msm_reg_wr_strobe_shift,
reg_wr_strobe);
}
/* pci */
void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
{
aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr,
pci_reg_res_dsbl_msk,
pci_reg_res_dsbl_shift,
pci_reg_res_dis);
}
void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp,
u32 scratch_scp)
{
aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp),
glb_cpu_scratch_scp);
}
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File hw_atl_llh.h: Declarations of bitfield and register access functions for
* Atlantic registers.
*/
#ifndef HW_ATL_LLH_H
#define HW_ATL_LLH_H
#include <linux/types.h>
struct aq_hw_s;
/* global */
/* set global microprocessor semaphore */
void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
u32 semaphore);
/* get global microprocessor semaphore */
u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
/* set global register reset disable */
void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
/* set soft reset */
void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
/* get soft reset */
u32 glb_soft_res_get(struct aq_hw_s *aq_hw);
/* stats */
u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
/* get rx dma good octet counter lsw */
u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good packet counter lsw */
u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good octet counter lsw */
u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good packet counter lsw */
u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good octet counter msw */
u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
/* get rx dma good packet counter msw */
u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good octet counter msw */
u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
/* get tx dma good packet counter msw */
u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
/* get msm rx errors counter register */
u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx unicast frames counter register */
u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx multicast frames counter register */
u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx broadcast frames counter register */
u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm rx broadcast octets counter register 1 */
u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm rx unicast octets counter register 0 */
u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
/* get rx dma statistics counter 7 */
u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
/* get msm tx errors counter register */
u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx unicast frames counter register */
u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx multicast frames counter register */
u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx broadcast frames counter register */
u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
/* get msm tx multicast octets counter register 1 */
u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm tx broadcast octets counter register 1 */
u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm tx unicast octets counter register 0 */
u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
/* get global mif identification */
u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
/* interrupt */
/* set interrupt auto mask lsw */
void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw);
/* set interrupt mapping enable rx */
void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx);
/* set interrupt mapping enable tx */
void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx);
/* set interrupt mapping rx */
void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
/* set interrupt mapping tx */
void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
/* set interrupt mask clear lsw */
void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw);
/* set interrupt mask set lsw */
void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
/* set interrupt register reset disable */
void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
/* set interrupt status clear lsw */
void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
u32 irq_status_clearlsw);
/* get interrupt status lsw */
u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
/* get reset interrupt */
u32 itr_res_irq_get(struct aq_hw_s *aq_hw);
/* set reset interrupt */
void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
/* rdm */
/* set cpu id */
void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
/* set rx dca enable */
void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
/* set rx dca mode */
void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
/* set rx descriptor data buffer size */
void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
u32 rx_desc_data_buff_size,
u32 descriptor);
/* set rx descriptor dca enable */
void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
u32 dca);
/* set rx descriptor enable */
void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
u32 descriptor);
/* set rx descriptor header splitting */
void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
u32 rx_desc_head_splitting,
u32 descriptor);
/* get rx descriptor head pointer */
u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set rx descriptor length */
void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
u32 descriptor);
/* set rx descriptor write-back interrupt enable */
void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
u32 rx_desc_wr_wb_irq_en);
/* set rx header dca enable */
void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
u32 dca);
/* set rx payload dca enable */
void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca);
/* set rx descriptor header buffer size */
void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
u32 rx_desc_head_buff_size,
u32 descriptor);
/* set rx descriptor reset */
void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
u32 descriptor);
/* Set RDM Interrupt Moderation Enable */
void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en);
/* reg */
/* set general interrupt mapping register */
void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx);
/* get general interrupt status register */
u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
/* set interrupt global control register */
void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
/* set interrupt throttle register */
void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
/* set rx dma descriptor base address lsw */
void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
u32 rx_dma_desc_base_addrlsw,
u32 descriptor);
/* set rx dma descriptor base address msw */
void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
u32 rx_dma_desc_base_addrmsw,
u32 descriptor);
/* get rx dma descriptor status register */
u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set rx dma descriptor tail pointer register */
void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
u32 rx_dma_desc_tail_ptr,
u32 descriptor);
/* set rx filter multicast filter mask register */
void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
u32 rx_flr_mcst_flr_msk);
/* set rx filter multicast filter register */
void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
u32 filter);
/* set rx filter rss control register 1 */
void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
u32 rx_flr_rss_control1);
/* Set RX Filter Control Register 2 */
void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
/* Set RX Interrupt Moderation Control Register */
void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
u32 rx_intr_moderation_ctl,
u32 queue);
/* set tx dma debug control */
void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl);
/* set tx dma descriptor base address lsw */
void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
u32 tx_dma_desc_base_addrlsw,
u32 descriptor);
/* set tx dma descriptor base address msw */
void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
u32 tx_dma_desc_base_addrmsw,
u32 descriptor);
/* set tx dma descriptor tail pointer register */
void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
u32 tx_dma_desc_tail_ptr,
u32 descriptor);
/* Set TX Interrupt Moderation Control Register */
void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
u32 tx_intr_moderation_ctl,
u32 queue);
/* set global microprocessor scratch pad */
void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
u32 glb_cpu_scratch_scp, u32 scratch_scp);
/* rpb */
/* set dma system loopback */
void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
/* set rx traffic class mode */
void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
u32 rx_traf_class_mode);
/* set rx buffer enable */
void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
/* set rx buffer high threshold (per tc) */
void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_buff_hi_threshold_per_tc,
u32 buffer);
/* set rx buffer low threshold (per tc) */
void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_buff_lo_threshold_per_tc,
u32 buffer);
/* set rx flow control mode */
void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
/* set rx packet buffer size (per tc) */
void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 rx_pkt_buff_size_per_tc,
u32 buffer);
/* set rx xoff enable (per tc) */
void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
u32 buffer);
/* rpf */
/* set l2 broadcast count threshold */
void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
u32 l2broadcast_count_threshold);
/* set l2 broadcast enable */
void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
/* set l2 broadcast filter action */
void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
u32 l2broadcast_flr_act);
/* set l2 multicast filter enable */
void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
u32 filter);
/* set l2 promiscuous mode enable */
void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
u32 l2promiscuous_mode_en);
/* set l2 unicast filter action */
void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
u32 filter);
/* set l2 unicast filter enable */
void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
u32 filter);
/* set l2 unicast destination address lsw */
void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
u32 l2unicast_dest_addresslsw,
u32 filter);
/* set l2 unicast destination address msw */
void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
u32 l2unicast_dest_addressmsw,
u32 filter);
/* Set L2 Accept all Multicast packets */
void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
u32 l2_accept_all_mc_packets);
/* set user-priority tc mapping */
void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
u32 user_priority_tc_map, u32 tc);
/* set rss key address */
void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
/* set rss key write data */
void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
/* get rss key write enable */
u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
/* set rss key write enable */
void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
/* set rss redirection table address */
void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
u32 rss_redir_tbl_addr);
/* set rss redirection table write data */
void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
u32 rss_redir_tbl_wr_data);
/* get rss redirection write enable */
u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
/* set rss redirection write enable */
void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
/* set tpo to rpf system loopback */
void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
u32 tpo_to_rpf_sys_lbk);
/* set vlan inner ethertype */
void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
/* set vlan outer ethertype */
void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
/* set vlan promiscuous mode enable */
void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en);
/* Set VLAN untagged action */
void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act);
/* Set VLAN accept untagged packets */
void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
u32 vlan_accept_untagged_packets);
/* Set VLAN filter enable */
void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter);
/* Set VLAN Filter Action */
void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
u32 filter);
/* Set VLAN ID Filter */
void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter);
/* set ethertype filter enable */
void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter);
/* set ethertype user-priority enable */
void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
u32 etht_user_priority_en, u32 filter);
/* set ethertype rx queue enable */
void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
u32 filter);
/* set ethertype rx queue */
void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
u32 filter);
/* set ethertype user-priority */
void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
u32 filter);
/* set ethertype management queue */
void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
u32 filter);
/* set ethertype filter action */
void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
u32 filter);
/* set ethertype filter */
void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
/* rpo */
/* set ipv4 header checksum offload enable */
void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en);
/* set rx descriptor vlan stripping */
void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
u32 rx_desc_vlan_stripping,
u32 descriptor);
/* set tcp/udp checksum offload enable */
void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 tcp_udp_crc_offload_en);
/* Set LRO Patch Optimization Enable. */
void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
u32 lro_patch_optimization_en);
/* Set Large Receive Offload Enable */
void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
/* Set LRO Q Sessions Limit */
void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim);
/* Set LRO Total Descriptor Limit */
void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim);
/* Set LRO Min Payload of First Packet */
void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
u32 lro_min_pld_of_first_pkt);
/* Set LRO Packet Limit */
void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
/* Set LRO Max Number of Descriptors */
void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
u32 lro_max_desc_num, u32 lro);
/* Set LRO Time Base Divider */
void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
u32 lro_time_base_divider);
/*Set LRO Inactive Interval */
void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
u32 lro_inactive_interval);
/*Set LRO Max Coalescing Interval */
void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
u32 lro_max_coalescing_interval);
/* rx */
/* set rx register reset disable */
void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
/* tdm */
/* set cpu id */
void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
/* set large send offload enable */
void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
u32 large_send_offload_en);
/* set tx descriptor enable */
void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor);
/* set tx dca enable */
void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
/* set tx dca mode */
void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
/* set tx descriptor dca enable */
void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca);
/* get tx descriptor head pointer */
u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
/* set tx descriptor length */
void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
u32 descriptor);
/* set tx descriptor write-back interrupt enable */
void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
u32 tx_desc_wr_wb_irq_en);
/* set tx descriptor write-back threshold */
void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
u32 tx_desc_wr_wb_threshold,
u32 descriptor);
/* Set TDM Interrupt Moderation Enable */
void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
u32 tdm_irq_moderation_en);
/* thm */
/* set lso tcp flag of first packet */
void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
u32 lso_tcp_flag_of_first_pkt);
/* set lso tcp flag of last packet */
void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
u32 lso_tcp_flag_of_last_pkt);
/* set lso tcp flag of middle packet */
void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
u32 lso_tcp_flag_of_middle_pkt);
/* tpb */
/* set tx buffer enable */
void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
/* set tx buffer high threshold (per tc) */
void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_buff_hi_threshold_per_tc,
u32 buffer);
/* set tx buffer low threshold (per tc) */
void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_buff_lo_threshold_per_tc,
u32 buffer);
/* set tx dma system loopback enable */
void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
/* set tx packet buffer size (per tc) */
void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_buff_size_per_tc, u32 buffer);
/* set tx path pad insert enable */
void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
/* tpo */
/* set ipv4 header checksum offload enable */
void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en);
/* set tcp/udp checksum offload enable */
void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 tcp_udp_crc_offload_en);
/* set tx pkt system loopback enable */
void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en);
/* tps */
/* set tx packet scheduler data arbitration mode */
void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_data_arb_mode);
/* set tx packet scheduler descriptor rate current time reset */
void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
u32 curr_time_res);
/* set tx packet scheduler descriptor rate limit */
void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_rate_lim);
/* set tx packet scheduler descriptor tc arbitration mode */
void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_arb_mode);
/* set tx packet scheduler descriptor tc max credit */
void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_max_credit,
u32 tc);
/* set tx packet scheduler descriptor tc weight */
void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_weight,
u32 tc);
/* set tx packet scheduler descriptor vm arbitration mode */
void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_vm_arb_mode);
/* set tx packet scheduler tc data max credit */
void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_max_credit,
u32 tc);
/* set tx packet scheduler tc data weight */
void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight,
u32 tc);
/* tx */
/* set tx register reset disable */
void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
/* msm */
/* get register access status */
u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw);
/* set register address for indirect address */
void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
u32 reg_addr_for_indirect_addr);
/* set register read strobe */
void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
/* get register read data */
u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
/* set register write data */
void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
/* set register write strobe */
void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
/* pci */
/* set pci register reset disable */
void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
#endif /* HW_ATL_LLH_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File hw_atl_llh_internal.h: Preprocessor definitions
* for Atlantic registers.
*/
#ifndef HW_ATL_LLH_INTERNAL_H
#define HW_ATL_LLH_INTERNAL_H
/* global microprocessor semaphore definitions
* base address: 0x000003a0
* parameter: semaphore {s} | stride size 0x4 | range [0, 15]
*/
#define glb_cpu_sem_adr(semaphore) (0x000003a0u + (semaphore) * 0x4)
/* register address for bitfield rx dma good octet counter lsw [1f:0] */
#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808
/* register address for bitfield rx dma good packet counter lsw [1f:0] */
#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800
/* register address for bitfield tx dma good octet counter lsw [1f:0] */
#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808
/* register address for bitfield tx dma good packet counter lsw [1f:0] */
#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800
/* register address for bitfield rx dma good octet counter msw [3f:20] */
#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c
/* register address for bitfield rx dma good packet counter msw [3f:20] */
#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804
/* register address for bitfield tx dma good octet counter msw [3f:20] */
#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c
/* register address for bitfield tx dma good packet counter msw [3f:20] */
#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804
/* preprocessor definitions for msm rx errors counter register */
#define mac_msm_rx_errs_cnt_adr 0x00000120u
/* preprocessor definitions for msm rx unicast frames counter register */
#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u
/* preprocessor definitions for msm rx multicast frames counter register */
#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u
/* preprocessor definitions for msm rx broadcast frames counter register */
#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u
/* preprocessor definitions for msm rx broadcast octets counter register 1 */
#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u
/* preprocessor definitions for msm rx broadcast octets counter register 2 */
#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u
/* preprocessor definitions for msm rx unicast octets counter register 0 */
#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u
/* preprocessor definitions for rx dma statistics counter 7 */
#define rx_dma_stat_counter7_adr 0x00006818u
/* preprocessor definitions for msm tx unicast frames counter register */
#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u
/* preprocessor definitions for msm tx multicast frames counter register */
#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u
/* preprocessor definitions for global mif identification */
#define glb_mif_id_adr 0x0000001cu
/* register address for bitfield iamr_lsw[1f:0] */
#define itr_iamrlsw_adr 0x00002090
/* register address for bitfield rx dma drop packet counter [1f:0] */
#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818
/* register address for bitfield imcr_lsw[1f:0] */
#define itr_imcrlsw_adr 0x00002070
/* register address for bitfield imsr_lsw[1f:0] */
#define itr_imsrlsw_adr 0x00002060
/* register address for bitfield itr_reg_res_dsbl */
#define itr_reg_res_dsbl_adr 0x00002300
/* bitmask for bitfield itr_reg_res_dsbl */
#define itr_reg_res_dsbl_msk 0x20000000
/* lower bit position of bitfield itr_reg_res_dsbl */
#define itr_reg_res_dsbl_shift 29
/* register address for bitfield iscr_lsw[1f:0] */
#define itr_iscrlsw_adr 0x00002050
/* register address for bitfield isr_lsw[1f:0] */
#define itr_isrlsw_adr 0x00002000
/* register address for bitfield itr_reset */
#define itr_res_adr 0x00002300
/* bitmask for bitfield itr_reset */
#define itr_res_msk 0x80000000
/* lower bit position of bitfield itr_reset */
#define itr_res_shift 31
/* register address for bitfield dca{d}_cpuid[7:0] */
#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_cpuid[7:0] */
#define rdm_dcadcpuid_msk 0x000000ff
/* lower bit position of bitfield dca{d}_cpuid[7:0] */
#define rdm_dcadcpuid_shift 0
/* register address for bitfield dca_en */
#define rdm_dca_en_adr 0x00006180
/* rx dca_en bitfield definitions
* preprocessor definitions for the bitfield "dca_en".
* port="pif_rdm_dca_en_i"
*/
/* register address for bitfield dca_en */
#define rdm_dca_en_adr 0x00006180
/* bitmask for bitfield dca_en */
#define rdm_dca_en_msk 0x80000000
/* inverted bitmask for bitfield dca_en */
#define rdm_dca_en_mskn 0x7fffffff
/* lower bit position of bitfield dca_en */
#define rdm_dca_en_shift 31
/* width of bitfield dca_en */
#define rdm_dca_en_width 1
/* default value of bitfield dca_en */
#define rdm_dca_en_default 0x1
/* rx dca_mode[3:0] bitfield definitions
* preprocessor definitions for the bitfield "dca_mode[3:0]".
* port="pif_rdm_dca_mode_i[3:0]"
*/
/* register address for bitfield dca_mode[3:0] */
#define rdm_dca_mode_adr 0x00006180
/* bitmask for bitfield dca_mode[3:0] */
#define rdm_dca_mode_msk 0x0000000f
/* inverted bitmask for bitfield dca_mode[3:0] */
#define rdm_dca_mode_mskn 0xfffffff0
/* lower bit position of bitfield dca_mode[3:0] */
#define rdm_dca_mode_shift 0
/* width of bitfield dca_mode[3:0] */
#define rdm_dca_mode_width 4
/* default value of bitfield dca_mode[3:0] */
#define rdm_dca_mode_default 0x0
/* rx desc{d}_data_size[4:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_data_size[4:0]".
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
* port="pif_rdm_desc0_data_size_i[4:0]"
*/
/* register address for bitfield desc{d}_data_size[4:0] */
#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_data_size[4:0] */
#define rdm_descddata_size_msk 0x0000001f
/* inverted bitmask for bitfield desc{d}_data_size[4:0] */
#define rdm_descddata_size_mskn 0xffffffe0
/* lower bit position of bitfield desc{d}_data_size[4:0] */
#define rdm_descddata_size_shift 0
/* width of bitfield desc{d}_data_size[4:0] */
#define rdm_descddata_size_width 5
/* default value of bitfield desc{d}_data_size[4:0] */
#define rdm_descddata_size_default 0x0
/* rx dca{d}_desc_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_desc_en".
* parameter: dca {d} | stride size 0x4 | range [0, 31]
* port="pif_rdm_dca_desc_en_i[0]"
*/
/* register address for bitfield dca{d}_desc_en */
#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_desc_en */
#define rdm_dcaddesc_en_msk 0x80000000
/* inverted bitmask for bitfield dca{d}_desc_en */
#define rdm_dcaddesc_en_mskn 0x7fffffff
/* lower bit position of bitfield dca{d}_desc_en */
#define rdm_dcaddesc_en_shift 31
/* width of bitfield dca{d}_desc_en */
#define rdm_dcaddesc_en_width 1
/* default value of bitfield dca{d}_desc_en */
#define rdm_dcaddesc_en_default 0x0
/* rx desc{d}_en bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_en".
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
* port="pif_rdm_desc_en_i[0]"
*/
/* register address for bitfield desc{d}_en */
#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_en */
#define rdm_descden_msk 0x80000000
/* inverted bitmask for bitfield desc{d}_en */
#define rdm_descden_mskn 0x7fffffff
/* lower bit position of bitfield desc{d}_en */
#define rdm_descden_shift 31
/* width of bitfield desc{d}_en */
#define rdm_descden_width 1
/* default value of bitfield desc{d}_en */
#define rdm_descden_default 0x0
/* rx desc{d}_hdr_size[4:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]".
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
* port="pif_rdm_desc0_hdr_size_i[4:0]"
*/
/* register address for bitfield desc{d}_hdr_size[4:0] */
#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hdr_size[4:0] */
#define rdm_descdhdr_size_msk 0x00001f00
/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */
#define rdm_descdhdr_size_mskn 0xffffe0ff
/* lower bit position of bitfield desc{d}_hdr_size[4:0] */
#define rdm_descdhdr_size_shift 8
/* width of bitfield desc{d}_hdr_size[4:0] */
#define rdm_descdhdr_size_width 5
/* default value of bitfield desc{d}_hdr_size[4:0] */
#define rdm_descdhdr_size_default 0x0
/* rx desc{d}_hdr_split bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hdr_split".
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
* port="pif_rdm_desc_hdr_split_i[0]"
*/
/* register address for bitfield desc{d}_hdr_split */
#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hdr_split */
#define rdm_descdhdr_split_msk 0x10000000
/* inverted bitmask for bitfield desc{d}_hdr_split */
#define rdm_descdhdr_split_mskn 0xefffffff
/* lower bit position of bitfield desc{d}_hdr_split */
#define rdm_descdhdr_split_shift 28
/* width of bitfield desc{d}_hdr_split */
#define rdm_descdhdr_split_width 1
/* default value of bitfield desc{d}_hdr_split */
#define rdm_descdhdr_split_default 0x0
/* rx desc{d}_hd[c:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
* port="rdm_pif_desc0_hd_o[12:0]"
*/
/* register address for bitfield desc{d}_hd[c:0] */
#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_hd[c:0] */
#define rdm_descdhd_msk 0x00001fff
/* inverted bitmask for bitfield desc{d}_hd[c:0] */
#define rdm_descdhd_mskn 0xffffe000
/* lower bit position of bitfield desc{d}_hd[c:0] */
#define rdm_descdhd_shift 0
/* width of bitfield desc{d}_hd[c:0] */
#define rdm_descdhd_width 13
/* rx desc{d}_len[9:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_len[9:0]".
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
* port="pif_rdm_desc0_len_i[9:0]"
*/
/* register address for bitfield desc{d}_len[9:0] */
#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_len[9:0] */
#define rdm_descdlen_msk 0x00001ff8
/* inverted bitmask for bitfield desc{d}_len[9:0] */
#define rdm_descdlen_mskn 0xffffe007
/* lower bit position of bitfield desc{d}_len[9:0] */
#define rdm_descdlen_shift 3
/* width of bitfield desc{d}_len[9:0] */
#define rdm_descdlen_width 10
/* default value of bitfield desc{d}_len[9:0] */
#define rdm_descdlen_default 0x0
/* rx desc{d}_reset bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_reset".
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
* port="pif_rdm_q_pf_res_i[0]"
*/
/* register address for bitfield desc{d}_reset */
#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_reset */
#define rdm_descdreset_msk 0x02000000
/* inverted bitmask for bitfield desc{d}_reset */
#define rdm_descdreset_mskn 0xfdffffff
/* lower bit position of bitfield desc{d}_reset */
#define rdm_descdreset_shift 25
/* width of bitfield desc{d}_reset */
#define rdm_descdreset_width 1
/* default value of bitfield desc{d}_reset */
#define rdm_descdreset_default 0x0
/* rx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
* port="pif_rdm_int_desc_wrb_en_i"
*/
/* register address for bitfield int_desc_wrb_en */
#define rdm_int_desc_wrb_en_adr 0x00005a30
/* bitmask for bitfield int_desc_wrb_en */
#define rdm_int_desc_wrb_en_msk 0x00000004
/* inverted bitmask for bitfield int_desc_wrb_en */
#define rdm_int_desc_wrb_en_mskn 0xfffffffb
/* lower bit position of bitfield int_desc_wrb_en */
#define rdm_int_desc_wrb_en_shift 2
/* width of bitfield int_desc_wrb_en */
#define rdm_int_desc_wrb_en_width 1
/* default value of bitfield int_desc_wrb_en */
#define rdm_int_desc_wrb_en_default 0x0
/* rx dca{d}_hdr_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_hdr_en".
* parameter: dca {d} | stride size 0x4 | range [0, 31]
* port="pif_rdm_dca_hdr_en_i[0]"
*/
/* register address for bitfield dca{d}_hdr_en */
#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_hdr_en */
#define rdm_dcadhdr_en_msk 0x40000000
/* inverted bitmask for bitfield dca{d}_hdr_en */
#define rdm_dcadhdr_en_mskn 0xbfffffff
/* lower bit position of bitfield dca{d}_hdr_en */
#define rdm_dcadhdr_en_shift 30
/* width of bitfield dca{d}_hdr_en */
#define rdm_dcadhdr_en_width 1
/* default value of bitfield dca{d}_hdr_en */
#define rdm_dcadhdr_en_default 0x0
/* rx dca{d}_pay_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_pay_en".
* parameter: dca {d} | stride size 0x4 | range [0, 31]
* port="pif_rdm_dca_pay_en_i[0]"
*/
/* register address for bitfield dca{d}_pay_en */
#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_pay_en */
#define rdm_dcadpay_en_msk 0x20000000
/* inverted bitmask for bitfield dca{d}_pay_en */
#define rdm_dcadpay_en_mskn 0xdfffffff
/* lower bit position of bitfield dca{d}_pay_en */
#define rdm_dcadpay_en_shift 29
/* width of bitfield dca{d}_pay_en */
#define rdm_dcadpay_en_width 1
/* default value of bitfield dca{d}_pay_en */
#define rdm_dcadpay_en_default 0x0
/* RX rdm_int_rim_en Bitfield Definitions
* Preprocessor definitions for the bitfield "rdm_int_rim_en".
* PORT="pif_rdm_int_rim_en_i"
*/
/* Register address for bitfield rdm_int_rim_en */
#define rdm_int_rim_en_adr 0x00005A30
/* Bitmask for bitfield rdm_int_rim_en */
#define rdm_int_rim_en_msk 0x00000008
/* Inverted bitmask for bitfield rdm_int_rim_en */
#define rdm_int_rim_en_mskn 0xFFFFFFF7
/* Lower bit position of bitfield rdm_int_rim_en */
#define rdm_int_rim_en_shift 3
/* Width of bitfield rdm_int_rim_en */
#define rdm_int_rim_en_width 1
/* Default value of bitfield rdm_int_rim_en */
#define rdm_int_rim_en_default 0x0
/* general interrupt mapping register definitions
* preprocessor definitions for general interrupt mapping register
* base address: 0x00002180
* parameter: regidx {f} | stride size 0x4 | range [0, 3]
*/
#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4)
/* general interrupt status register definitions
* preprocessor definitions for general interrupt status register
* address: 0x000021A0
*/
#define gen_intr_stat_adr 0x000021A4U
/* interrupt global control register definitions
* preprocessor definitions for interrupt global control register
* address: 0x00002300
*/
#define intr_glb_ctl_adr 0x00002300u
/* interrupt throttle register definitions
* preprocessor definitions for interrupt throttle register
* base address: 0x00002800
* parameter: throttle {t} | stride size 0x4 | range [0, 31]
*/
#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4)
/* rx dma descriptor base address lsw definitions
* preprocessor definitions for rx dma descriptor base address lsw
* base address: 0x00005b00
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
#define rx_dma_desc_base_addrlsw_adr(descriptor) \
(0x00005b00u + (descriptor) * 0x20)
/* rx dma descriptor base address msw definitions
* preprocessor definitions for rx dma descriptor base address msw
* base address: 0x00005b04
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
#define rx_dma_desc_base_addrmsw_adr(descriptor) \
(0x00005b04u + (descriptor) * 0x20)
/* rx dma descriptor status register definitions
* preprocessor definitions for rx dma descriptor status register
* base address: 0x00005b14
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20)
/* rx dma descriptor tail pointer register definitions
* preprocessor definitions for rx dma descriptor tail pointer register
* base address: 0x00005b10
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
*/
#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20)
/* rx interrupt moderation control register definitions
* Preprocessor definitions for RX Interrupt Moderation Control Register
* Base Address: 0x00005A40
* Parameter: RIM {R} | stride size 0x4 | range [0, 31]
*/
#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4)
/* rx filter multicast filter mask register definitions
* preprocessor definitions for rx filter multicast filter mask register
* address: 0x00005270
*/
#define rx_flr_mcst_flr_msk_adr 0x00005270u
/* rx filter multicast filter register definitions
* preprocessor definitions for rx filter multicast filter register
* base address: 0x00005250
* parameter: filter {f} | stride size 0x4 | range [0, 7]
*/
#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4)
/* RX Filter RSS Control Register 1 Definitions
* Preprocessor definitions for RX Filter RSS Control Register 1
* Address: 0x000054C0
*/
#define rx_flr_rss_control1_adr 0x000054C0u
/* RX Filter Control Register 2 Definitions
* Preprocessor definitions for RX Filter Control Register 2
* Address: 0x00005104
*/
#define rx_flr_control2_adr 0x00005104u
/* tx tx dma debug control [1f:0] bitfield definitions
* preprocessor definitions for the bitfield "tx dma debug control [1f:0]".
* port="pif_tdm_debug_cntl_i[31:0]"
*/
/* register address for bitfield tx dma debug control [1f:0] */
#define tdm_tx_dma_debug_ctl_adr 0x00008920
/* bitmask for bitfield tx dma debug control [1f:0] */
#define tdm_tx_dma_debug_ctl_msk 0xffffffff
/* inverted bitmask for bitfield tx dma debug control [1f:0] */
#define tdm_tx_dma_debug_ctl_mskn 0x00000000
/* lower bit position of bitfield tx dma debug control [1f:0] */
#define tdm_tx_dma_debug_ctl_shift 0
/* width of bitfield tx dma debug control [1f:0] */
#define tdm_tx_dma_debug_ctl_width 32
/* default value of bitfield tx dma debug control [1f:0] */
#define tdm_tx_dma_debug_ctl_default 0x0
/* tx dma descriptor base address lsw definitions
* preprocessor definitions for tx dma descriptor base address lsw
* base address: 0x00007c00
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
*/
#define tx_dma_desc_base_addrlsw_adr(descriptor) \
(0x00007c00u + (descriptor) * 0x40)
/* tx dma descriptor tail pointer register definitions
* preprocessor definitions for tx dma descriptor tail pointer register
* base address: 0x00007c10
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
*/
#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40)
/* rx dma_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "dma_sys_loopback".
* port="pif_rpb_dma_sys_lbk_i"
*/
/* register address for bitfield dma_sys_loopback */
#define rpb_dma_sys_lbk_adr 0x00005000
/* bitmask for bitfield dma_sys_loopback */
#define rpb_dma_sys_lbk_msk 0x00000040
/* inverted bitmask for bitfield dma_sys_loopback */
#define rpb_dma_sys_lbk_mskn 0xffffffbf
/* lower bit position of bitfield dma_sys_loopback */
#define rpb_dma_sys_lbk_shift 6
/* width of bitfield dma_sys_loopback */
#define rpb_dma_sys_lbk_width 1
/* default value of bitfield dma_sys_loopback */
#define rpb_dma_sys_lbk_default 0x0
/* rx rx_tc_mode bitfield definitions
* preprocessor definitions for the bitfield "rx_tc_mode".
* port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i"
*/
/* register address for bitfield rx_tc_mode */
#define rpb_rpf_rx_tc_mode_adr 0x00005700
/* bitmask for bitfield rx_tc_mode */
#define rpb_rpf_rx_tc_mode_msk 0x00000100
/* inverted bitmask for bitfield rx_tc_mode */
#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff
/* lower bit position of bitfield rx_tc_mode */
#define rpb_rpf_rx_tc_mode_shift 8
/* width of bitfield rx_tc_mode */
#define rpb_rpf_rx_tc_mode_width 1
/* default value of bitfield rx_tc_mode */
#define rpb_rpf_rx_tc_mode_default 0x0
/* rx rx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "rx_buf_en".
* port="pif_rpb_rx_buf_en_i"
*/
/* register address for bitfield rx_buf_en */
#define rpb_rx_buf_en_adr 0x00005700
/* bitmask for bitfield rx_buf_en */
#define rpb_rx_buf_en_msk 0x00000001
/* inverted bitmask for bitfield rx_buf_en */
#define rpb_rx_buf_en_mskn 0xfffffffe
/* lower bit position of bitfield rx_buf_en */
#define rpb_rx_buf_en_shift 0
/* width of bitfield rx_buf_en */
#define rpb_rx_buf_en_width 1
/* default value of bitfield rx_buf_en */
#define rpb_rx_buf_en_default 0x0
/* rx rx{b}_hi_thresh[d:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
* port="pif_rpb_rx0_hi_thresh_i[13:0]"
*/
/* register address for bitfield rx{b}_hi_thresh[d:0] */
#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_hi_thresh[d:0] */
#define rpb_rxbhi_thresh_msk 0x3fff0000
/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */
#define rpb_rxbhi_thresh_mskn 0xc000ffff
/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */
#define rpb_rxbhi_thresh_shift 16
/* width of bitfield rx{b}_hi_thresh[d:0] */
#define rpb_rxbhi_thresh_width 14
/* default value of bitfield rx{b}_hi_thresh[d:0] */
#define rpb_rxbhi_thresh_default 0x0
/* rx rx{b}_lo_thresh[d:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
* port="pif_rpb_rx0_lo_thresh_i[13:0]"
*/
/* register address for bitfield rx{b}_lo_thresh[d:0] */
#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_lo_thresh[d:0] */
#define rpb_rxblo_thresh_msk 0x00003fff
/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */
#define rpb_rxblo_thresh_mskn 0xffffc000
/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */
#define rpb_rxblo_thresh_shift 0
/* width of bitfield rx{b}_lo_thresh[d:0] */
#define rpb_rxblo_thresh_width 14
/* default value of bitfield rx{b}_lo_thresh[d:0] */
#define rpb_rxblo_thresh_default 0x0
/* rx rx_fc_mode[1:0] bitfield definitions
* preprocessor definitions for the bitfield "rx_fc_mode[1:0]".
* port="pif_rpb_rx_fc_mode_i[1:0]"
*/
/* register address for bitfield rx_fc_mode[1:0] */
#define rpb_rx_fc_mode_adr 0x00005700
/* bitmask for bitfield rx_fc_mode[1:0] */
#define rpb_rx_fc_mode_msk 0x00000030
/* inverted bitmask for bitfield rx_fc_mode[1:0] */
#define rpb_rx_fc_mode_mskn 0xffffffcf
/* lower bit position of bitfield rx_fc_mode[1:0] */
#define rpb_rx_fc_mode_shift 4
/* width of bitfield rx_fc_mode[1:0] */
#define rpb_rx_fc_mode_width 2
/* default value of bitfield rx_fc_mode[1:0] */
#define rpb_rx_fc_mode_default 0x0
/* rx rx{b}_buf_size[8:0] bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
* port="pif_rpb_rx0_buf_size_i[8:0]"
*/
/* register address for bitfield rx{b}_buf_size[8:0] */
#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_buf_size[8:0] */
#define rpb_rxbbuf_size_msk 0x000001ff
/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */
#define rpb_rxbbuf_size_mskn 0xfffffe00
/* lower bit position of bitfield rx{b}_buf_size[8:0] */
#define rpb_rxbbuf_size_shift 0
/* width of bitfield rx{b}_buf_size[8:0] */
#define rpb_rxbbuf_size_width 9
/* default value of bitfield rx{b}_buf_size[8:0] */
#define rpb_rxbbuf_size_default 0x0
/* rx rx{b}_xoff_en bitfield definitions
* preprocessor definitions for the bitfield "rx{b}_xoff_en".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
* port="pif_rpb_rx_xoff_en_i[0]"
*/
/* register address for bitfield rx{b}_xoff_en */
#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10)
/* bitmask for bitfield rx{b}_xoff_en */
#define rpb_rxbxoff_en_msk 0x80000000
/* inverted bitmask for bitfield rx{b}_xoff_en */
#define rpb_rxbxoff_en_mskn 0x7fffffff
/* lower bit position of bitfield rx{b}_xoff_en */
#define rpb_rxbxoff_en_shift 31
/* width of bitfield rx{b}_xoff_en */
#define rpb_rxbxoff_en_width 1
/* default value of bitfield rx{b}_xoff_en */
#define rpb_rxbxoff_en_default 0x0
/* rx l2_bc_thresh[f:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_thresh[f:0]".
* port="pif_rpf_l2_bc_thresh_i[15:0]"
*/
/* register address for bitfield l2_bc_thresh[f:0] */
#define rpfl2bc_thresh_adr 0x00005100
/* bitmask for bitfield l2_bc_thresh[f:0] */
#define rpfl2bc_thresh_msk 0xffff0000
/* inverted bitmask for bitfield l2_bc_thresh[f:0] */
#define rpfl2bc_thresh_mskn 0x0000ffff
/* lower bit position of bitfield l2_bc_thresh[f:0] */
#define rpfl2bc_thresh_shift 16
/* width of bitfield l2_bc_thresh[f:0] */
#define rpfl2bc_thresh_width 16
/* default value of bitfield l2_bc_thresh[f:0] */
#define rpfl2bc_thresh_default 0x0
/* rx l2_bc_en bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_en".
* port="pif_rpf_l2_bc_en_i"
*/
/* register address for bitfield l2_bc_en */
#define rpfl2bc_en_adr 0x00005100
/* bitmask for bitfield l2_bc_en */
#define rpfl2bc_en_msk 0x00000001
/* inverted bitmask for bitfield l2_bc_en */
#define rpfl2bc_en_mskn 0xfffffffe
/* lower bit position of bitfield l2_bc_en */
#define rpfl2bc_en_shift 0
/* width of bitfield l2_bc_en */
#define rpfl2bc_en_width 1
/* default value of bitfield l2_bc_en */
#define rpfl2bc_en_default 0x0
/* rx l2_bc_act[2:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_bc_act[2:0]".
* port="pif_rpf_l2_bc_act_i[2:0]"
*/
/* register address for bitfield l2_bc_act[2:0] */
#define rpfl2bc_act_adr 0x00005100
/* bitmask for bitfield l2_bc_act[2:0] */
#define rpfl2bc_act_msk 0x00007000
/* inverted bitmask for bitfield l2_bc_act[2:0] */
#define rpfl2bc_act_mskn 0xffff8fff
/* lower bit position of bitfield l2_bc_act[2:0] */
#define rpfl2bc_act_shift 12
/* width of bitfield l2_bc_act[2:0] */
#define rpfl2bc_act_width 3
/* default value of bitfield l2_bc_act[2:0] */
#define rpfl2bc_act_default 0x0
/* rx l2_mc_en{f} bitfield definitions
* preprocessor definitions for the bitfield "l2_mc_en{f}".
* parameter: filter {f} | stride size 0x4 | range [0, 7]
* port="pif_rpf_l2_mc_en_i[0]"
*/
/* register address for bitfield l2_mc_en{f} */
#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4)
/* bitmask for bitfield l2_mc_en{f} */
#define rpfl2mc_enf_msk 0x80000000
/* inverted bitmask for bitfield l2_mc_en{f} */
#define rpfl2mc_enf_mskn 0x7fffffff
/* lower bit position of bitfield l2_mc_en{f} */
#define rpfl2mc_enf_shift 31
/* width of bitfield l2_mc_en{f} */
#define rpfl2mc_enf_width 1
/* default value of bitfield l2_mc_en{f} */
#define rpfl2mc_enf_default 0x0
/* rx l2_promis_mode bitfield definitions
* preprocessor definitions for the bitfield "l2_promis_mode".
* port="pif_rpf_l2_promis_mode_i"
*/
/* register address for bitfield l2_promis_mode */
#define rpfl2promis_mode_adr 0x00005100
/* bitmask for bitfield l2_promis_mode */
#define rpfl2promis_mode_msk 0x00000008
/* inverted bitmask for bitfield l2_promis_mode */
#define rpfl2promis_mode_mskn 0xfffffff7
/* lower bit position of bitfield l2_promis_mode */
#define rpfl2promis_mode_shift 3
/* width of bitfield l2_promis_mode */
#define rpfl2promis_mode_width 1
/* default value of bitfield l2_promis_mode */
#define rpfl2promis_mode_default 0x0
/* rx l2_uc_act{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]".
* parameter: filter {f} | stride size 0x8 | range [0, 37]
* port="pif_rpf_l2_uc_act0_i[2:0]"
*/
/* register address for bitfield l2_uc_act{f}[2:0] */
#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_act{f}[2:0] */
#define rpfl2uc_actf_msk 0x00070000
/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */
#define rpfl2uc_actf_mskn 0xfff8ffff
/* lower bit position of bitfield l2_uc_act{f}[2:0] */
#define rpfl2uc_actf_shift 16
/* width of bitfield l2_uc_act{f}[2:0] */
#define rpfl2uc_actf_width 3
/* default value of bitfield l2_uc_act{f}[2:0] */
#define rpfl2uc_actf_default 0x0
/* rx l2_uc_en{f} bitfield definitions
* preprocessor definitions for the bitfield "l2_uc_en{f}".
* parameter: filter {f} | stride size 0x8 | range [0, 37]
* port="pif_rpf_l2_uc_en_i[0]"
*/
/* register address for bitfield l2_uc_en{f} */
#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_en{f} */
#define rpfl2uc_enf_msk 0x80000000
/* inverted bitmask for bitfield l2_uc_en{f} */
#define rpfl2uc_enf_mskn 0x7fffffff
/* lower bit position of bitfield l2_uc_en{f} */
#define rpfl2uc_enf_shift 31
/* width of bitfield l2_uc_en{f} */
#define rpfl2uc_enf_width 1
/* default value of bitfield l2_uc_en{f} */
#define rpfl2uc_enf_default 0x0
/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */
#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8)
/* register address for bitfield l2_uc_da{f}_msw[f:0] */
#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8)
/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */
#define rpfl2uc_dafmsw_msk 0x0000ffff
/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */
#define rpfl2uc_dafmsw_shift 0
/* rx l2_mc_accept_all bitfield definitions
* Preprocessor definitions for the bitfield "l2_mc_accept_all".
* PORT="pif_rpf_l2_mc_all_accept_i"
*/
/* Register address for bitfield l2_mc_accept_all */
#define rpfl2mc_accept_all_adr 0x00005270
/* Bitmask for bitfield l2_mc_accept_all */
#define rpfl2mc_accept_all_msk 0x00004000
/* Inverted bitmask for bitfield l2_mc_accept_all */
#define rpfl2mc_accept_all_mskn 0xFFFFBFFF
/* Lower bit position of bitfield l2_mc_accept_all */
#define rpfl2mc_accept_all_shift 14
/* Width of bitfield l2_mc_accept_all */
#define rpfl2mc_accept_all_width 1
/* Default value of bitfield l2_mc_accept_all */
#define rpfl2mc_accept_all_default 0x0
/* width of bitfield rx_tc_up{t}[2:0] */
#define rpf_rpb_rx_tc_upt_width 3
/* default value of bitfield rx_tc_up{t}[2:0] */
#define rpf_rpb_rx_tc_upt_default 0x0
/* rx rss_key_addr[4:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_key_addr[4:0]".
* port="pif_rpf_rss_key_addr_i[4:0]"
*/
/* register address for bitfield rss_key_addr[4:0] */
#define rpf_rss_key_addr_adr 0x000054d0
/* bitmask for bitfield rss_key_addr[4:0] */
#define rpf_rss_key_addr_msk 0x0000001f
/* inverted bitmask for bitfield rss_key_addr[4:0] */
#define rpf_rss_key_addr_mskn 0xffffffe0
/* lower bit position of bitfield rss_key_addr[4:0] */
#define rpf_rss_key_addr_shift 0
/* width of bitfield rss_key_addr[4:0] */
#define rpf_rss_key_addr_width 5
/* default value of bitfield rss_key_addr[4:0] */
#define rpf_rss_key_addr_default 0x0
/* rx rss_key_wr_data[1f:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]".
* port="pif_rpf_rss_key_wr_data_i[31:0]"
*/
/* register address for bitfield rss_key_wr_data[1f:0] */
#define rpf_rss_key_wr_data_adr 0x000054d4
/* bitmask for bitfield rss_key_wr_data[1f:0] */
#define rpf_rss_key_wr_data_msk 0xffffffff
/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */
#define rpf_rss_key_wr_data_mskn 0x00000000
/* lower bit position of bitfield rss_key_wr_data[1f:0] */
#define rpf_rss_key_wr_data_shift 0
/* width of bitfield rss_key_wr_data[1f:0] */
#define rpf_rss_key_wr_data_width 32
/* default value of bitfield rss_key_wr_data[1f:0] */
#define rpf_rss_key_wr_data_default 0x0
/* rx rss_key_wr_en_i bitfield definitions
* preprocessor definitions for the bitfield "rss_key_wr_en_i".
* port="pif_rpf_rss_key_wr_en_i"
*/
/* register address for bitfield rss_key_wr_en_i */
#define rpf_rss_key_wr_eni_adr 0x000054d0
/* bitmask for bitfield rss_key_wr_en_i */
#define rpf_rss_key_wr_eni_msk 0x00000020
/* inverted bitmask for bitfield rss_key_wr_en_i */
#define rpf_rss_key_wr_eni_mskn 0xffffffdf
/* lower bit position of bitfield rss_key_wr_en_i */
#define rpf_rss_key_wr_eni_shift 5
/* width of bitfield rss_key_wr_en_i */
#define rpf_rss_key_wr_eni_width 1
/* default value of bitfield rss_key_wr_en_i */
#define rpf_rss_key_wr_eni_default 0x0
/* rx rss_redir_addr[3:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_addr[3:0]".
* port="pif_rpf_rss_redir_addr_i[3:0]"
*/
/* register address for bitfield rss_redir_addr[3:0] */
#define rpf_rss_redir_addr_adr 0x000054e0
/* bitmask for bitfield rss_redir_addr[3:0] */
#define rpf_rss_redir_addr_msk 0x0000000f
/* inverted bitmask for bitfield rss_redir_addr[3:0] */
#define rpf_rss_redir_addr_mskn 0xfffffff0
/* lower bit position of bitfield rss_redir_addr[3:0] */
#define rpf_rss_redir_addr_shift 0
/* width of bitfield rss_redir_addr[3:0] */
#define rpf_rss_redir_addr_width 4
/* default value of bitfield rss_redir_addr[3:0] */
#define rpf_rss_redir_addr_default 0x0
/* rx rss_redir_wr_data[f:0] bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]".
* port="pif_rpf_rss_redir_wr_data_i[15:0]"
*/
/* register address for bitfield rss_redir_wr_data[f:0] */
#define rpf_rss_redir_wr_data_adr 0x000054e4
/* bitmask for bitfield rss_redir_wr_data[f:0] */
#define rpf_rss_redir_wr_data_msk 0x0000ffff
/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */
#define rpf_rss_redir_wr_data_mskn 0xffff0000
/* lower bit position of bitfield rss_redir_wr_data[f:0] */
#define rpf_rss_redir_wr_data_shift 0
/* width of bitfield rss_redir_wr_data[f:0] */
#define rpf_rss_redir_wr_data_width 16
/* default value of bitfield rss_redir_wr_data[f:0] */
#define rpf_rss_redir_wr_data_default 0x0
/* rx rss_redir_wr_en_i bitfield definitions
* preprocessor definitions for the bitfield "rss_redir_wr_en_i".
* port="pif_rpf_rss_redir_wr_en_i"
*/
/* register address for bitfield rss_redir_wr_en_i */
#define rpf_rss_redir_wr_eni_adr 0x000054e0
/* bitmask for bitfield rss_redir_wr_en_i */
#define rpf_rss_redir_wr_eni_msk 0x00000010
/* inverted bitmask for bitfield rss_redir_wr_en_i */
#define rpf_rss_redir_wr_eni_mskn 0xffffffef
/* lower bit position of bitfield rss_redir_wr_en_i */
#define rpf_rss_redir_wr_eni_shift 4
/* width of bitfield rss_redir_wr_en_i */
#define rpf_rss_redir_wr_eni_width 1
/* default value of bitfield rss_redir_wr_en_i */
#define rpf_rss_redir_wr_eni_default 0x0
/* rx tpo_rpf_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "tpo_rpf_sys_loopback".
* port="pif_rpf_tpo_pkt_sys_lbk_i"
*/
/* register address for bitfield tpo_rpf_sys_loopback */
#define rpf_tpo_rpf_sys_lbk_adr 0x00005000
/* bitmask for bitfield tpo_rpf_sys_loopback */
#define rpf_tpo_rpf_sys_lbk_msk 0x00000100
/* inverted bitmask for bitfield tpo_rpf_sys_loopback */
#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff
/* lower bit position of bitfield tpo_rpf_sys_loopback */
#define rpf_tpo_rpf_sys_lbk_shift 8
/* width of bitfield tpo_rpf_sys_loopback */
#define rpf_tpo_rpf_sys_lbk_width 1
/* default value of bitfield tpo_rpf_sys_loopback */
#define rpf_tpo_rpf_sys_lbk_default 0x0
/* rx vl_inner_tpid[f:0] bitfield definitions
* preprocessor definitions for the bitfield "vl_inner_tpid[f:0]".
* port="pif_rpf_vl_inner_tpid_i[15:0]"
*/
/* register address for bitfield vl_inner_tpid[f:0] */
#define rpf_vl_inner_tpid_adr 0x00005284
/* bitmask for bitfield vl_inner_tpid[f:0] */
#define rpf_vl_inner_tpid_msk 0x0000ffff
/* inverted bitmask for bitfield vl_inner_tpid[f:0] */
#define rpf_vl_inner_tpid_mskn 0xffff0000
/* lower bit position of bitfield vl_inner_tpid[f:0] */
#define rpf_vl_inner_tpid_shift 0
/* width of bitfield vl_inner_tpid[f:0] */
#define rpf_vl_inner_tpid_width 16
/* default value of bitfield vl_inner_tpid[f:0] */
#define rpf_vl_inner_tpid_default 0x8100
/* rx vl_outer_tpid[f:0] bitfield definitions
* preprocessor definitions for the bitfield "vl_outer_tpid[f:0]".
* port="pif_rpf_vl_outer_tpid_i[15:0]"
*/
/* register address for bitfield vl_outer_tpid[f:0] */
#define rpf_vl_outer_tpid_adr 0x00005284
/* bitmask for bitfield vl_outer_tpid[f:0] */
#define rpf_vl_outer_tpid_msk 0xffff0000
/* inverted bitmask for bitfield vl_outer_tpid[f:0] */
#define rpf_vl_outer_tpid_mskn 0x0000ffff
/* lower bit position of bitfield vl_outer_tpid[f:0] */
#define rpf_vl_outer_tpid_shift 16
/* width of bitfield vl_outer_tpid[f:0] */
#define rpf_vl_outer_tpid_width 16
/* default value of bitfield vl_outer_tpid[f:0] */
#define rpf_vl_outer_tpid_default 0x88a8
/* rx vl_promis_mode bitfield definitions
* preprocessor definitions for the bitfield "vl_promis_mode".
* port="pif_rpf_vl_promis_mode_i"
*/
/* register address for bitfield vl_promis_mode */
#define rpf_vl_promis_mode_adr 0x00005280
/* bitmask for bitfield vl_promis_mode */
#define rpf_vl_promis_mode_msk 0x00000002
/* inverted bitmask for bitfield vl_promis_mode */
#define rpf_vl_promis_mode_mskn 0xfffffffd
/* lower bit position of bitfield vl_promis_mode */
#define rpf_vl_promis_mode_shift 1
/* width of bitfield vl_promis_mode */
#define rpf_vl_promis_mode_width 1
/* default value of bitfield vl_promis_mode */
#define rpf_vl_promis_mode_default 0x0
/* RX vl_accept_untagged_mode Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_accept_untagged_mode".
* PORT="pif_rpf_vl_accept_untagged_i"
*/
/* Register address for bitfield vl_accept_untagged_mode */
#define rpf_vl_accept_untagged_mode_adr 0x00005280
/* Bitmask for bitfield vl_accept_untagged_mode */
#define rpf_vl_accept_untagged_mode_msk 0x00000004
/* Inverted bitmask for bitfield vl_accept_untagged_mode */
#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB
/* Lower bit position of bitfield vl_accept_untagged_mode */
#define rpf_vl_accept_untagged_mode_shift 2
/* Width of bitfield vl_accept_untagged_mode */
#define rpf_vl_accept_untagged_mode_width 1
/* Default value of bitfield vl_accept_untagged_mode */
#define rpf_vl_accept_untagged_mode_default 0x0
/* rX vl_untagged_act[2:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_untagged_act[2:0]".
* PORT="pif_rpf_vl_untagged_act_i[2:0]"
*/
/* Register address for bitfield vl_untagged_act[2:0] */
#define rpf_vl_untagged_act_adr 0x00005280
/* Bitmask for bitfield vl_untagged_act[2:0] */
#define rpf_vl_untagged_act_msk 0x00000038
/* Inverted bitmask for bitfield vl_untagged_act[2:0] */
#define rpf_vl_untagged_act_mskn 0xFFFFFFC7
/* Lower bit position of bitfield vl_untagged_act[2:0] */
#define rpf_vl_untagged_act_shift 3
/* Width of bitfield vl_untagged_act[2:0] */
#define rpf_vl_untagged_act_width 3
/* Default value of bitfield vl_untagged_act[2:0] */
#define rpf_vl_untagged_act_default 0x0
/* RX vl_en{F} Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_en{F}".
* Parameter: filter {F} | stride size 0x4 | range [0, 15]
* PORT="pif_rpf_vl_en_i[0]"
*/
/* Register address for bitfield vl_en{F} */
#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_en{F} */
#define rpf_vl_en_f_msk 0x80000000
/* Inverted bitmask for bitfield vl_en{F} */
#define rpf_vl_en_f_mskn 0x7FFFFFFF
/* Lower bit position of bitfield vl_en{F} */
#define rpf_vl_en_f_shift 31
/* Width of bitfield vl_en{F} */
#define rpf_vl_en_f_width 1
/* Default value of bitfield vl_en{F} */
#define rpf_vl_en_f_default 0x0
/* RX vl_act{F}[2:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_act{F}[2:0]".
* Parameter: filter {F} | stride size 0x4 | range [0, 15]
* PORT="pif_rpf_vl_act0_i[2:0]"
*/
/* Register address for bitfield vl_act{F}[2:0] */
#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_act{F}[2:0] */
#define rpf_vl_act_f_msk 0x00070000
/* Inverted bitmask for bitfield vl_act{F}[2:0] */
#define rpf_vl_act_f_mskn 0xFFF8FFFF
/* Lower bit position of bitfield vl_act{F}[2:0] */
#define rpf_vl_act_f_shift 16
/* Width of bitfield vl_act{F}[2:0] */
#define rpf_vl_act_f_width 3
/* Default value of bitfield vl_act{F}[2:0] */
#define rpf_vl_act_f_default 0x0
/* RX vl_id{F}[B:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "vl_id{F}[B:0]".
* Parameter: filter {F} | stride size 0x4 | range [0, 15]
* PORT="pif_rpf_vl_id0_i[11:0]"
*/
/* Register address for bitfield vl_id{F}[B:0] */
#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4)
/* Bitmask for bitfield vl_id{F}[B:0] */
#define rpf_vl_id_f_msk 0x00000FFF
/* Inverted bitmask for bitfield vl_id{F}[B:0] */
#define rpf_vl_id_f_mskn 0xFFFFF000
/* Lower bit position of bitfield vl_id{F}[B:0] */
#define rpf_vl_id_f_shift 0
/* Width of bitfield vl_id{F}[B:0] */
#define rpf_vl_id_f_width 12
/* Default value of bitfield vl_id{F}[B:0] */
#define rpf_vl_id_f_default 0x0
/* RX et_en{F} Bitfield Definitions
* Preprocessor definitions for the bitfield "et_en{F}".
* Parameter: filter {F} | stride size 0x4 | range [0, 15]
* PORT="pif_rpf_et_en_i[0]"
*/
/* Register address for bitfield et_en{F} */
#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4)
/* Bitmask for bitfield et_en{F} */
#define rpf_et_en_f_msk 0x80000000
/* Inverted bitmask for bitfield et_en{F} */
#define rpf_et_en_f_mskn 0x7FFFFFFF
/* Lower bit position of bitfield et_en{F} */
#define rpf_et_en_f_shift 31
/* Width of bitfield et_en{F} */
#define rpf_et_en_f_width 1
/* Default value of bitfield et_en{F} */
#define rpf_et_en_f_default 0x0
/* rx et_en{f} bitfield definitions
* preprocessor definitions for the bitfield "et_en{f}".
* parameter: filter {f} | stride size 0x4 | range [0, 15]
* port="pif_rpf_et_en_i[0]"
*/
/* register address for bitfield et_en{f} */
#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_en{f} */
#define rpf_et_enf_msk 0x80000000
/* inverted bitmask for bitfield et_en{f} */
#define rpf_et_enf_mskn 0x7fffffff
/* lower bit position of bitfield et_en{f} */
#define rpf_et_enf_shift 31
/* width of bitfield et_en{f} */
#define rpf_et_enf_width 1
/* default value of bitfield et_en{f} */
#define rpf_et_enf_default 0x0
/* rx et_up{f}_en bitfield definitions
* preprocessor definitions for the bitfield "et_up{f}_en".
* parameter: filter {f} | stride size 0x4 | range [0, 15]
* port="pif_rpf_et_up_en_i[0]"
*/
/* register address for bitfield et_up{f}_en */
#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_up{f}_en */
#define rpf_et_upfen_msk 0x40000000
/* inverted bitmask for bitfield et_up{f}_en */
#define rpf_et_upfen_mskn 0xbfffffff
/* lower bit position of bitfield et_up{f}_en */
#define rpf_et_upfen_shift 30
/* width of bitfield et_up{f}_en */
#define rpf_et_upfen_width 1
/* default value of bitfield et_up{f}_en */
#define rpf_et_upfen_default 0x0
/* rx et_rxq{f}_en bitfield definitions
* preprocessor definitions for the bitfield "et_rxq{f}_en".
* parameter: filter {f} | stride size 0x4 | range [0, 15]
* port="pif_rpf_et_rxq_en_i[0]"
*/
/* register address for bitfield et_rxq{f}_en */
#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_rxq{f}_en */
#define rpf_et_rxqfen_msk 0x20000000
/* inverted bitmask for bitfield et_rxq{f}_en */
#define rpf_et_rxqfen_mskn 0xdfffffff
/* lower bit position of bitfield et_rxq{f}_en */
#define rpf_et_rxqfen_shift 29
/* width of bitfield et_rxq{f}_en */
#define rpf_et_rxqfen_width 1
/* default value of bitfield et_rxq{f}_en */
#define rpf_et_rxqfen_default 0x0
/* rx et_up{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "et_up{f}[2:0]".
* parameter: filter {f} | stride size 0x4 | range [0, 15]
* port="pif_rpf_et_up0_i[2:0]"
*/
/* register address for bitfield et_up{f}[2:0] */
#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_up{f}[2:0] */
#define rpf_et_upf_msk 0x1c000000
/* inverted bitmask for bitfield et_up{f}[2:0] */
#define rpf_et_upf_mskn 0xe3ffffff
/* lower bit position of bitfield et_up{f}[2:0] */
#define rpf_et_upf_shift 26
/* width of bitfield et_up{f}[2:0] */
#define rpf_et_upf_width 3
/* default value of bitfield et_up{f}[2:0] */
#define rpf_et_upf_default 0x0
/* rx et_rxq{f}[4:0] bitfield definitions
* preprocessor definitions for the bitfield "et_rxq{f}[4:0]".
* parameter: filter {f} | stride size 0x4 | range [0, 15]
* port="pif_rpf_et_rxq0_i[4:0]"
*/
/* register address for bitfield et_rxq{f}[4:0] */
#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_rxq{f}[4:0] */
#define rpf_et_rxqf_msk 0x01f00000
/* inverted bitmask for bitfield et_rxq{f}[4:0] */
#define rpf_et_rxqf_mskn 0xfe0fffff
/* lower bit position of bitfield et_rxq{f}[4:0] */
#define rpf_et_rxqf_shift 20
/* width of bitfield et_rxq{f}[4:0] */
#define rpf_et_rxqf_width 5
/* default value of bitfield et_rxq{f}[4:0] */
#define rpf_et_rxqf_default 0x0
/* rx et_mng_rxq{f} bitfield definitions
* preprocessor definitions for the bitfield "et_mng_rxq{f}".
* parameter: filter {f} | stride size 0x4 | range [0, 15]
* port="pif_rpf_et_mng_rxq_i[0]"
*/
/* register address for bitfield et_mng_rxq{f} */
#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_mng_rxq{f} */
#define rpf_et_mng_rxqf_msk 0x00080000
/* inverted bitmask for bitfield et_mng_rxq{f} */
#define rpf_et_mng_rxqf_mskn 0xfff7ffff
/* lower bit position of bitfield et_mng_rxq{f} */
#define rpf_et_mng_rxqf_shift 19
/* width of bitfield et_mng_rxq{f} */
#define rpf_et_mng_rxqf_width 1
/* default value of bitfield et_mng_rxq{f} */
#define rpf_et_mng_rxqf_default 0x0
/* rx et_act{f}[2:0] bitfield definitions
* preprocessor definitions for the bitfield "et_act{f}[2:0]".
* parameter: filter {f} | stride size 0x4 | range [0, 15]
* port="pif_rpf_et_act0_i[2:0]"
*/
/* register address for bitfield et_act{f}[2:0] */
#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_act{f}[2:0] */
#define rpf_et_actf_msk 0x00070000
/* inverted bitmask for bitfield et_act{f}[2:0] */
#define rpf_et_actf_mskn 0xfff8ffff
/* lower bit position of bitfield et_act{f}[2:0] */
#define rpf_et_actf_shift 16
/* width of bitfield et_act{f}[2:0] */
#define rpf_et_actf_width 3
/* default value of bitfield et_act{f}[2:0] */
#define rpf_et_actf_default 0x0
/* rx et_val{f}[f:0] bitfield definitions
* preprocessor definitions for the bitfield "et_val{f}[f:0]".
* parameter: filter {f} | stride size 0x4 | range [0, 15]
* port="pif_rpf_et_val0_i[15:0]"
*/
/* register address for bitfield et_val{f}[f:0] */
#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4)
/* bitmask for bitfield et_val{f}[f:0] */
#define rpf_et_valf_msk 0x0000ffff
/* inverted bitmask for bitfield et_val{f}[f:0] */
#define rpf_et_valf_mskn 0xffff0000
/* lower bit position of bitfield et_val{f}[f:0] */
#define rpf_et_valf_shift 0
/* width of bitfield et_val{f}[f:0] */
#define rpf_et_valf_width 16
/* default value of bitfield et_val{f}[f:0] */
#define rpf_et_valf_default 0x0
/* rx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
* port="pif_rpo_ipv4_chk_en_i"
*/
/* register address for bitfield ipv4_chk_en */
#define rpo_ipv4chk_en_adr 0x00005580
/* bitmask for bitfield ipv4_chk_en */
#define rpo_ipv4chk_en_msk 0x00000002
/* inverted bitmask for bitfield ipv4_chk_en */
#define rpo_ipv4chk_en_mskn 0xfffffffd
/* lower bit position of bitfield ipv4_chk_en */
#define rpo_ipv4chk_en_shift 1
/* width of bitfield ipv4_chk_en */
#define rpo_ipv4chk_en_width 1
/* default value of bitfield ipv4_chk_en */
#define rpo_ipv4chk_en_default 0x0
/* rx desc{d}_vl_strip bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_vl_strip".
* parameter: descriptor {d} | stride size 0x20 | range [0, 31]
* port="pif_rpo_desc_vl_strip_i[0]"
*/
/* register address for bitfield desc{d}_vl_strip */
#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
/* bitmask for bitfield desc{d}_vl_strip */
#define rpo_descdvl_strip_msk 0x20000000
/* inverted bitmask for bitfield desc{d}_vl_strip */
#define rpo_descdvl_strip_mskn 0xdfffffff
/* lower bit position of bitfield desc{d}_vl_strip */
#define rpo_descdvl_strip_shift 29
/* width of bitfield desc{d}_vl_strip */
#define rpo_descdvl_strip_width 1
/* default value of bitfield desc{d}_vl_strip */
#define rpo_descdvl_strip_default 0x0
/* rx l4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "l4_chk_en".
* port="pif_rpo_l4_chk_en_i"
*/
/* register address for bitfield l4_chk_en */
#define rpol4chk_en_adr 0x00005580
/* bitmask for bitfield l4_chk_en */
#define rpol4chk_en_msk 0x00000001
/* inverted bitmask for bitfield l4_chk_en */
#define rpol4chk_en_mskn 0xfffffffe
/* lower bit position of bitfield l4_chk_en */
#define rpol4chk_en_shift 0
/* width of bitfield l4_chk_en */
#define rpol4chk_en_width 1
/* default value of bitfield l4_chk_en */
#define rpol4chk_en_default 0x0
/* rx reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
* port="pif_rx_reg_res_dsbl_i"
*/
/* register address for bitfield reg_res_dsbl */
#define rx_reg_res_dsbl_adr 0x00005000
/* bitmask for bitfield reg_res_dsbl */
#define rx_reg_res_dsbl_msk 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
#define rx_reg_res_dsbl_mskn 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
#define rx_reg_res_dsbl_shift 29
/* width of bitfield reg_res_dsbl */
#define rx_reg_res_dsbl_width 1
/* default value of bitfield reg_res_dsbl */
#define rx_reg_res_dsbl_default 0x1
/* tx dca{d}_cpuid[7:0] bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]".
* parameter: dca {d} | stride size 0x4 | range [0, 31]
* port="pif_tdm_dca0_cpuid_i[7:0]"
*/
/* register address for bitfield dca{d}_cpuid[7:0] */
#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_cpuid[7:0] */
#define tdm_dcadcpuid_msk 0x000000ff
/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */
#define tdm_dcadcpuid_mskn 0xffffff00
/* lower bit position of bitfield dca{d}_cpuid[7:0] */
#define tdm_dcadcpuid_shift 0
/* width of bitfield dca{d}_cpuid[7:0] */
#define tdm_dcadcpuid_width 8
/* default value of bitfield dca{d}_cpuid[7:0] */
#define tdm_dcadcpuid_default 0x0
/* tx lso_en[1f:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_en[1f:0]".
* port="pif_tdm_lso_en_i[31:0]"
*/
/* register address for bitfield lso_en[1f:0] */
#define tdm_lso_en_adr 0x00007810
/* bitmask for bitfield lso_en[1f:0] */
#define tdm_lso_en_msk 0xffffffff
/* inverted bitmask for bitfield lso_en[1f:0] */
#define tdm_lso_en_mskn 0x00000000
/* lower bit position of bitfield lso_en[1f:0] */
#define tdm_lso_en_shift 0
/* width of bitfield lso_en[1f:0] */
#define tdm_lso_en_width 32
/* default value of bitfield lso_en[1f:0] */
#define tdm_lso_en_default 0x0
/* tx dca_en bitfield definitions
* preprocessor definitions for the bitfield "dca_en".
* port="pif_tdm_dca_en_i"
*/
/* register address for bitfield dca_en */
#define tdm_dca_en_adr 0x00008480
/* bitmask for bitfield dca_en */
#define tdm_dca_en_msk 0x80000000
/* inverted bitmask for bitfield dca_en */
#define tdm_dca_en_mskn 0x7fffffff
/* lower bit position of bitfield dca_en */
#define tdm_dca_en_shift 31
/* width of bitfield dca_en */
#define tdm_dca_en_width 1
/* default value of bitfield dca_en */
#define tdm_dca_en_default 0x1
/* tx dca_mode[3:0] bitfield definitions
* preprocessor definitions for the bitfield "dca_mode[3:0]".
* port="pif_tdm_dca_mode_i[3:0]"
*/
/* register address for bitfield dca_mode[3:0] */
#define tdm_dca_mode_adr 0x00008480
/* bitmask for bitfield dca_mode[3:0] */
#define tdm_dca_mode_msk 0x0000000f
/* inverted bitmask for bitfield dca_mode[3:0] */
#define tdm_dca_mode_mskn 0xfffffff0
/* lower bit position of bitfield dca_mode[3:0] */
#define tdm_dca_mode_shift 0
/* width of bitfield dca_mode[3:0] */
#define tdm_dca_mode_width 4
/* default value of bitfield dca_mode[3:0] */
#define tdm_dca_mode_default 0x0
/* tx dca{d}_desc_en bitfield definitions
* preprocessor definitions for the bitfield "dca{d}_desc_en".
* parameter: dca {d} | stride size 0x4 | range [0, 31]
* port="pif_tdm_dca_desc_en_i[0]"
*/
/* register address for bitfield dca{d}_desc_en */
#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_desc_en */
#define tdm_dcaddesc_en_msk 0x80000000
/* inverted bitmask for bitfield dca{d}_desc_en */
#define tdm_dcaddesc_en_mskn 0x7fffffff
/* lower bit position of bitfield dca{d}_desc_en */
#define tdm_dcaddesc_en_shift 31
/* width of bitfield dca{d}_desc_en */
#define tdm_dcaddesc_en_width 1
/* default value of bitfield dca{d}_desc_en */
#define tdm_dcaddesc_en_default 0x0
/* tx desc{d}_en bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_en".
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
* port="pif_tdm_desc_en_i[0]"
*/
/* register address for bitfield desc{d}_en */
#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_en */
#define tdm_descden_msk 0x80000000
/* inverted bitmask for bitfield desc{d}_en */
#define tdm_descden_mskn 0x7fffffff
/* lower bit position of bitfield desc{d}_en */
#define tdm_descden_shift 31
/* width of bitfield desc{d}_en */
#define tdm_descden_width 1
/* default value of bitfield desc{d}_en */
#define tdm_descden_default 0x0
/* tx desc{d}_hd[c:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
* port="tdm_pif_desc0_hd_o[12:0]"
*/
/* register address for bitfield desc{d}_hd[c:0] */
#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_hd[c:0] */
#define tdm_descdhd_msk 0x00001fff
/* inverted bitmask for bitfield desc{d}_hd[c:0] */
#define tdm_descdhd_mskn 0xffffe000
/* lower bit position of bitfield desc{d}_hd[c:0] */
#define tdm_descdhd_shift 0
/* width of bitfield desc{d}_hd[c:0] */
#define tdm_descdhd_width 13
/* tx desc{d}_len[9:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_len[9:0]".
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
* port="pif_tdm_desc0_len_i[9:0]"
*/
/* register address for bitfield desc{d}_len[9:0] */
#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_len[9:0] */
#define tdm_descdlen_msk 0x00001ff8
/* inverted bitmask for bitfield desc{d}_len[9:0] */
#define tdm_descdlen_mskn 0xffffe007
/* lower bit position of bitfield desc{d}_len[9:0] */
#define tdm_descdlen_shift 3
/* width of bitfield desc{d}_len[9:0] */
#define tdm_descdlen_width 10
/* default value of bitfield desc{d}_len[9:0] */
#define tdm_descdlen_default 0x0
/* tx int_desc_wrb_en bitfield definitions
* preprocessor definitions for the bitfield "int_desc_wrb_en".
* port="pif_tdm_int_desc_wrb_en_i"
*/
/* register address for bitfield int_desc_wrb_en */
#define tdm_int_desc_wrb_en_adr 0x00007b40
/* bitmask for bitfield int_desc_wrb_en */
#define tdm_int_desc_wrb_en_msk 0x00000002
/* inverted bitmask for bitfield int_desc_wrb_en */
#define tdm_int_desc_wrb_en_mskn 0xfffffffd
/* lower bit position of bitfield int_desc_wrb_en */
#define tdm_int_desc_wrb_en_shift 1
/* width of bitfield int_desc_wrb_en */
#define tdm_int_desc_wrb_en_width 1
/* default value of bitfield int_desc_wrb_en */
#define tdm_int_desc_wrb_en_default 0x0
/* tx desc{d}_wrb_thresh[6:0] bitfield definitions
* preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]".
* parameter: descriptor {d} | stride size 0x40 | range [0, 31]
* port="pif_tdm_desc0_wrb_thresh_i[6:0]"
*/
/* register address for bitfield desc{d}_wrb_thresh[6:0] */
#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40)
/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */
#define tdm_descdwrb_thresh_msk 0x00007f00
/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */
#define tdm_descdwrb_thresh_mskn 0xffff80ff
/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */
#define tdm_descdwrb_thresh_shift 8
/* width of bitfield desc{d}_wrb_thresh[6:0] */
#define tdm_descdwrb_thresh_width 7
/* default value of bitfield desc{d}_wrb_thresh[6:0] */
#define tdm_descdwrb_thresh_default 0x0
/* tx lso_tcp_flag_first[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]".
* port="pif_thm_lso_tcp_flag_first_i[11:0]"
*/
/* register address for bitfield lso_tcp_flag_first[b:0] */
#define thm_lso_tcp_flag_first_adr 0x00007820
/* bitmask for bitfield lso_tcp_flag_first[b:0] */
#define thm_lso_tcp_flag_first_msk 0x00000fff
/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */
#define thm_lso_tcp_flag_first_mskn 0xfffff000
/* lower bit position of bitfield lso_tcp_flag_first[b:0] */
#define thm_lso_tcp_flag_first_shift 0
/* width of bitfield lso_tcp_flag_first[b:0] */
#define thm_lso_tcp_flag_first_width 12
/* default value of bitfield lso_tcp_flag_first[b:0] */
#define thm_lso_tcp_flag_first_default 0x0
/* tx lso_tcp_flag_last[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]".
* port="pif_thm_lso_tcp_flag_last_i[11:0]"
*/
/* register address for bitfield lso_tcp_flag_last[b:0] */
#define thm_lso_tcp_flag_last_adr 0x00007824
/* bitmask for bitfield lso_tcp_flag_last[b:0] */
#define thm_lso_tcp_flag_last_msk 0x00000fff
/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */
#define thm_lso_tcp_flag_last_mskn 0xfffff000
/* lower bit position of bitfield lso_tcp_flag_last[b:0] */
#define thm_lso_tcp_flag_last_shift 0
/* width of bitfield lso_tcp_flag_last[b:0] */
#define thm_lso_tcp_flag_last_width 12
/* default value of bitfield lso_tcp_flag_last[b:0] */
#define thm_lso_tcp_flag_last_default 0x0
/* tx lso_tcp_flag_mid[b:0] bitfield definitions
* preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]".
* port="pif_thm_lso_tcp_flag_mid_i[11:0]"
*/
/* Register address for bitfield lro_rsc_max[1F:0] */
#define rpo_lro_rsc_max_adr 0x00005598
/* Bitmask for bitfield lro_rsc_max[1F:0] */
#define rpo_lro_rsc_max_msk 0xFFFFFFFF
/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */
#define rpo_lro_rsc_max_mskn 0x00000000
/* Lower bit position of bitfield lro_rsc_max[1F:0] */
#define rpo_lro_rsc_max_shift 0
/* Width of bitfield lro_rsc_max[1F:0] */
#define rpo_lro_rsc_max_width 32
/* Default value of bitfield lro_rsc_max[1F:0] */
#define rpo_lro_rsc_max_default 0x0
/* RX lro_en[1F:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_en[1F:0]".
* PORT="pif_rpo_lro_en_i[31:0]"
*/
/* Register address for bitfield lro_en[1F:0] */
#define rpo_lro_en_adr 0x00005590
/* Bitmask for bitfield lro_en[1F:0] */
#define rpo_lro_en_msk 0xFFFFFFFF
/* Inverted bitmask for bitfield lro_en[1F:0] */
#define rpo_lro_en_mskn 0x00000000
/* Lower bit position of bitfield lro_en[1F:0] */
#define rpo_lro_en_shift 0
/* Width of bitfield lro_en[1F:0] */
#define rpo_lro_en_width 32
/* Default value of bitfield lro_en[1F:0] */
#define rpo_lro_en_default 0x0
/* RX lro_ptopt_en Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_ptopt_en".
* PORT="pif_rpo_lro_ptopt_en_i"
*/
/* Register address for bitfield lro_ptopt_en */
#define rpo_lro_ptopt_en_adr 0x00005594
/* Bitmask for bitfield lro_ptopt_en */
#define rpo_lro_ptopt_en_msk 0x00008000
/* Inverted bitmask for bitfield lro_ptopt_en */
#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF
/* Lower bit position of bitfield lro_ptopt_en */
#define rpo_lro_ptopt_en_shift 15
/* Width of bitfield lro_ptopt_en */
#define rpo_lro_ptopt_en_width 1
/* Default value of bitfield lro_ptopt_en */
#define rpo_lro_ptopt_en_defalt 0x1
/* RX lro_q_ses_lmt Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_q_ses_lmt".
* PORT="pif_rpo_lro_q_ses_lmt_i[1:0]"
*/
/* Register address for bitfield lro_q_ses_lmt */
#define rpo_lro_qses_lmt_adr 0x00005594
/* Bitmask for bitfield lro_q_ses_lmt */
#define rpo_lro_qses_lmt_msk 0x00003000
/* Inverted bitmask for bitfield lro_q_ses_lmt */
#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF
/* Lower bit position of bitfield lro_q_ses_lmt */
#define rpo_lro_qses_lmt_shift 12
/* Width of bitfield lro_q_ses_lmt */
#define rpo_lro_qses_lmt_width 2
/* Default value of bitfield lro_q_ses_lmt */
#define rpo_lro_qses_lmt_default 0x1
/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]".
* PORT="pif_rpo_lro_tot_dsc_lmt_i[1:0]"
*/
/* Register address for bitfield lro_tot_dsc_lmt[1:0] */
#define rpo_lro_tot_dsc_lmt_adr 0x00005594
/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */
#define rpo_lro_tot_dsc_lmt_msk 0x00000060
/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */
#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F
/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */
#define rpo_lro_tot_dsc_lmt_shift 5
/* Width of bitfield lro_tot_dsc_lmt[1:0] */
#define rpo_lro_tot_dsc_lmt_width 2
/* Default value of bitfield lro_tot_dsc_lmt[1:0] */
#define rpo_lro_tot_dsc_lmt_defalt 0x1
/* RX lro_pkt_min[4:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_pkt_min[4:0]".
* PORT="pif_rpo_lro_pkt_min_i[4:0]"
*/
/* Register address for bitfield lro_pkt_min[4:0] */
#define rpo_lro_pkt_min_adr 0x00005594
/* Bitmask for bitfield lro_pkt_min[4:0] */
#define rpo_lro_pkt_min_msk 0x0000001F
/* Inverted bitmask for bitfield lro_pkt_min[4:0] */
#define rpo_lro_pkt_min_mskn 0xFFFFFFE0
/* Lower bit position of bitfield lro_pkt_min[4:0] */
#define rpo_lro_pkt_min_shift 0
/* Width of bitfield lro_pkt_min[4:0] */
#define rpo_lro_pkt_min_width 5
/* Default value of bitfield lro_pkt_min[4:0] */
#define rpo_lro_pkt_min_default 0x8
/* Width of bitfield lro{L}_des_max[1:0] */
#define rpo_lro_ldes_max_width 2
/* Default value of bitfield lro{L}_des_max[1:0] */
#define rpo_lro_ldes_max_default 0x0
/* RX lro_tb_div[11:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_tb_div[11:0]".
* PORT="pif_rpo_lro_tb_div_i[11:0]"
*/
/* Register address for bitfield lro_tb_div[11:0] */
#define rpo_lro_tb_div_adr 0x00005620
/* Bitmask for bitfield lro_tb_div[11:0] */
#define rpo_lro_tb_div_msk 0xFFF00000
/* Inverted bitmask for bitfield lro_tb_div[11:0] */
#define rpo_lro_tb_div_mskn 0x000FFFFF
/* Lower bit position of bitfield lro_tb_div[11:0] */
#define rpo_lro_tb_div_shift 20
/* Width of bitfield lro_tb_div[11:0] */
#define rpo_lro_tb_div_width 12
/* Default value of bitfield lro_tb_div[11:0] */
#define rpo_lro_tb_div_default 0xC35
/* RX lro_ina_ival[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_ina_ival[9:0]".
* PORT="pif_rpo_lro_ina_ival_i[9:0]"
*/
/* Register address for bitfield lro_ina_ival[9:0] */
#define rpo_lro_ina_ival_adr 0x00005620
/* Bitmask for bitfield lro_ina_ival[9:0] */
#define rpo_lro_ina_ival_msk 0x000FFC00
/* Inverted bitmask for bitfield lro_ina_ival[9:0] */
#define rpo_lro_ina_ival_mskn 0xFFF003FF
/* Lower bit position of bitfield lro_ina_ival[9:0] */
#define rpo_lro_ina_ival_shift 10
/* Width of bitfield lro_ina_ival[9:0] */
#define rpo_lro_ina_ival_width 10
/* Default value of bitfield lro_ina_ival[9:0] */
#define rpo_lro_ina_ival_default 0xA
/* RX lro_max_ival[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lro_max_ival[9:0]".
* PORT="pif_rpo_lro_max_ival_i[9:0]"
*/
/* Register address for bitfield lro_max_ival[9:0] */
#define rpo_lro_max_ival_adr 0x00005620
/* Bitmask for bitfield lro_max_ival[9:0] */
#define rpo_lro_max_ival_msk 0x000003FF
/* Inverted bitmask for bitfield lro_max_ival[9:0] */
#define rpo_lro_max_ival_mskn 0xFFFFFC00
/* Lower bit position of bitfield lro_max_ival[9:0] */
#define rpo_lro_max_ival_shift 0
/* Width of bitfield lro_max_ival[9:0] */
#define rpo_lro_max_ival_width 10
/* Default value of bitfield lro_max_ival[9:0] */
#define rpo_lro_max_ival_default 0x19
/* TX dca{D}_cpuid[7:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]".
* Parameter: DCA {D} | stride size 0x4 | range [0, 31]
* PORT="pif_tdm_dca0_cpuid_i[7:0]"
*/
/* Register address for bitfield dca{D}_cpuid[7:0] */
#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
/* Bitmask for bitfield dca{D}_cpuid[7:0] */
#define tdm_dca_dcpuid_msk 0x000000FF
/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */
#define tdm_dca_dcpuid_mskn 0xFFFFFF00
/* Lower bit position of bitfield dca{D}_cpuid[7:0] */
#define tdm_dca_dcpuid_shift 0
/* Width of bitfield dca{D}_cpuid[7:0] */
#define tdm_dca_dcpuid_width 8
/* Default value of bitfield dca{D}_cpuid[7:0] */
#define tdm_dca_dcpuid_default 0x0
/* TX dca{D}_desc_en Bitfield Definitions
* Preprocessor definitions for the bitfield "dca{D}_desc_en".
* Parameter: DCA {D} | stride size 0x4 | range [0, 31]
* PORT="pif_tdm_dca_desc_en_i[0]"
*/
/* Register address for bitfield dca{D}_desc_en */
#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
/* Bitmask for bitfield dca{D}_desc_en */
#define tdm_dca_ddesc_en_msk 0x80000000
/* Inverted bitmask for bitfield dca{D}_desc_en */
#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF
/* Lower bit position of bitfield dca{D}_desc_en */
#define tdm_dca_ddesc_en_shift 31
/* Width of bitfield dca{D}_desc_en */
#define tdm_dca_ddesc_en_width 1
/* Default value of bitfield dca{D}_desc_en */
#define tdm_dca_ddesc_en_default 0x0
/* TX desc{D}_en Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_en".
* Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
* PORT="pif_tdm_desc_en_i[0]"
*/
/* Register address for bitfield desc{D}_en */
#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_en */
#define tdm_desc_den_msk 0x80000000
/* Inverted bitmask for bitfield desc{D}_en */
#define tdm_desc_den_mskn 0x7FFFFFFF
/* Lower bit position of bitfield desc{D}_en */
#define tdm_desc_den_shift 31
/* Width of bitfield desc{D}_en */
#define tdm_desc_den_width 1
/* Default value of bitfield desc{D}_en */
#define tdm_desc_den_default 0x0
/* TX desc{D}_hd[C:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_hd[C:0]".
* Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
* PORT="tdm_pif_desc0_hd_o[12:0]"
*/
/* Register address for bitfield desc{D}_hd[C:0] */
#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_hd[C:0] */
#define tdm_desc_dhd_msk 0x00001FFF
/* Inverted bitmask for bitfield desc{D}_hd[C:0] */
#define tdm_desc_dhd_mskn 0xFFFFE000
/* Lower bit position of bitfield desc{D}_hd[C:0] */
#define tdm_desc_dhd_shift 0
/* Width of bitfield desc{D}_hd[C:0] */
#define tdm_desc_dhd_width 13
/* TX desc{D}_len[9:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_len[9:0]".
* Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
* PORT="pif_tdm_desc0_len_i[9:0]"
*/
/* Register address for bitfield desc{D}_len[9:0] */
#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_len[9:0] */
#define tdm_desc_dlen_msk 0x00001FF8
/* Inverted bitmask for bitfield desc{D}_len[9:0] */
#define tdm_desc_dlen_mskn 0xFFFFE007
/* Lower bit position of bitfield desc{D}_len[9:0] */
#define tdm_desc_dlen_shift 3
/* Width of bitfield desc{D}_len[9:0] */
#define tdm_desc_dlen_width 10
/* Default value of bitfield desc{D}_len[9:0] */
#define tdm_desc_dlen_default 0x0
/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]".
* Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
* PORT="pif_tdm_desc0_wrb_thresh_i[6:0]"
*/
/* Register address for bitfield desc{D}_wrb_thresh[6:0] */
#define tdm_desc_dwrb_thresh_adr(descriptor) \
(0x00007C18 + (descriptor) * 0x40)
/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */
#define tdm_desc_dwrb_thresh_msk 0x00007F00
/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */
#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF
/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */
#define tdm_desc_dwrb_thresh_shift 8
/* Width of bitfield desc{D}_wrb_thresh[6:0] */
#define tdm_desc_dwrb_thresh_width 7
/* Default value of bitfield desc{D}_wrb_thresh[6:0] */
#define tdm_desc_dwrb_thresh_default 0x0
/* TX tdm_int_mod_en Bitfield Definitions
* Preprocessor definitions for the bitfield "tdm_int_mod_en".
* PORT="pif_tdm_int_mod_en_i"
*/
/* Register address for bitfield tdm_int_mod_en */
#define tdm_int_mod_en_adr 0x00007B40
/* Bitmask for bitfield tdm_int_mod_en */
#define tdm_int_mod_en_msk 0x00000010
/* Inverted bitmask for bitfield tdm_int_mod_en */
#define tdm_int_mod_en_mskn 0xFFFFFFEF
/* Lower bit position of bitfield tdm_int_mod_en */
#define tdm_int_mod_en_shift 4
/* Width of bitfield tdm_int_mod_en */
#define tdm_int_mod_en_width 1
/* Default value of bitfield tdm_int_mod_en */
#define tdm_int_mod_en_default 0x0
/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions
* Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]".
* PORT="pif_thm_lso_tcp_flag_mid_i[11:0]"
*/
/* register address for bitfield lso_tcp_flag_mid[b:0] */
#define thm_lso_tcp_flag_mid_adr 0x00007820
/* bitmask for bitfield lso_tcp_flag_mid[b:0] */
#define thm_lso_tcp_flag_mid_msk 0x0fff0000
/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */
#define thm_lso_tcp_flag_mid_mskn 0xf000ffff
/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */
#define thm_lso_tcp_flag_mid_shift 16
/* width of bitfield lso_tcp_flag_mid[b:0] */
#define thm_lso_tcp_flag_mid_width 12
/* default value of bitfield lso_tcp_flag_mid[b:0] */
#define thm_lso_tcp_flag_mid_default 0x0
/* tx tx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "tx_buf_en".
* port="pif_tpb_tx_buf_en_i"
*/
/* register address for bitfield tx_buf_en */
#define tpb_tx_buf_en_adr 0x00007900
/* bitmask for bitfield tx_buf_en */
#define tpb_tx_buf_en_msk 0x00000001
/* inverted bitmask for bitfield tx_buf_en */
#define tpb_tx_buf_en_mskn 0xfffffffe
/* lower bit position of bitfield tx_buf_en */
#define tpb_tx_buf_en_shift 0
/* width of bitfield tx_buf_en */
#define tpb_tx_buf_en_width 1
/* default value of bitfield tx_buf_en */
#define tpb_tx_buf_en_default 0x0
/* tx tx{b}_hi_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
* port="pif_tpb_tx0_hi_thresh_i[12:0]"
*/
/* register address for bitfield tx{b}_hi_thresh[c:0] */
#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_hi_thresh[c:0] */
#define tpb_txbhi_thresh_msk 0x1fff0000
/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */
#define tpb_txbhi_thresh_mskn 0xe000ffff
/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */
#define tpb_txbhi_thresh_shift 16
/* width of bitfield tx{b}_hi_thresh[c:0] */
#define tpb_txbhi_thresh_width 13
/* default value of bitfield tx{b}_hi_thresh[c:0] */
#define tpb_txbhi_thresh_default 0x0
/* tx tx{b}_lo_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
* port="pif_tpb_tx0_lo_thresh_i[12:0]"
*/
/* register address for bitfield tx{b}_lo_thresh[c:0] */
#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_lo_thresh[c:0] */
#define tpb_txblo_thresh_msk 0x00001fff
/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */
#define tpb_txblo_thresh_mskn 0xffffe000
/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */
#define tpb_txblo_thresh_shift 0
/* width of bitfield tx{b}_lo_thresh[c:0] */
#define tpb_txblo_thresh_width 13
/* default value of bitfield tx{b}_lo_thresh[c:0] */
#define tpb_txblo_thresh_default 0x0
/* tx dma_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "dma_sys_loopback".
* port="pif_tpb_dma_sys_lbk_i"
*/
/* register address for bitfield dma_sys_loopback */
#define tpb_dma_sys_lbk_adr 0x00007000
/* bitmask for bitfield dma_sys_loopback */
#define tpb_dma_sys_lbk_msk 0x00000040
/* inverted bitmask for bitfield dma_sys_loopback */
#define tpb_dma_sys_lbk_mskn 0xffffffbf
/* lower bit position of bitfield dma_sys_loopback */
#define tpb_dma_sys_lbk_shift 6
/* width of bitfield dma_sys_loopback */
#define tpb_dma_sys_lbk_width 1
/* default value of bitfield dma_sys_loopback */
#define tpb_dma_sys_lbk_default 0x0
/* tx tx{b}_buf_size[7:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
* port="pif_tpb_tx0_buf_size_i[7:0]"
*/
/* register address for bitfield tx{b}_buf_size[7:0] */
#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10)
/* bitmask for bitfield tx{b}_buf_size[7:0] */
#define tpb_txbbuf_size_msk 0x000000ff
/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */
#define tpb_txbbuf_size_mskn 0xffffff00
/* lower bit position of bitfield tx{b}_buf_size[7:0] */
#define tpb_txbbuf_size_shift 0
/* width of bitfield tx{b}_buf_size[7:0] */
#define tpb_txbbuf_size_width 8
/* default value of bitfield tx{b}_buf_size[7:0] */
#define tpb_txbbuf_size_default 0x0
/* tx tx_scp_ins_en bitfield definitions
* preprocessor definitions for the bitfield "tx_scp_ins_en".
* port="pif_tpb_scp_ins_en_i"
*/
/* register address for bitfield tx_scp_ins_en */
#define tpb_tx_scp_ins_en_adr 0x00007900
/* bitmask for bitfield tx_scp_ins_en */
#define tpb_tx_scp_ins_en_msk 0x00000004
/* inverted bitmask for bitfield tx_scp_ins_en */
#define tpb_tx_scp_ins_en_mskn 0xfffffffb
/* lower bit position of bitfield tx_scp_ins_en */
#define tpb_tx_scp_ins_en_shift 2
/* width of bitfield tx_scp_ins_en */
#define tpb_tx_scp_ins_en_width 1
/* default value of bitfield tx_scp_ins_en */
#define tpb_tx_scp_ins_en_default 0x0
/* tx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
* port="pif_tpo_ipv4_chk_en_i"
*/
/* register address for bitfield ipv4_chk_en */
#define tpo_ipv4chk_en_adr 0x00007800
/* bitmask for bitfield ipv4_chk_en */
#define tpo_ipv4chk_en_msk 0x00000002
/* inverted bitmask for bitfield ipv4_chk_en */
#define tpo_ipv4chk_en_mskn 0xfffffffd
/* lower bit position of bitfield ipv4_chk_en */
#define tpo_ipv4chk_en_shift 1
/* width of bitfield ipv4_chk_en */
#define tpo_ipv4chk_en_width 1
/* default value of bitfield ipv4_chk_en */
#define tpo_ipv4chk_en_default 0x0
/* tx l4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "l4_chk_en".
* port="pif_tpo_l4_chk_en_i"
*/
/* register address for bitfield l4_chk_en */
#define tpol4chk_en_adr 0x00007800
/* bitmask for bitfield l4_chk_en */
#define tpol4chk_en_msk 0x00000001
/* inverted bitmask for bitfield l4_chk_en */
#define tpol4chk_en_mskn 0xfffffffe
/* lower bit position of bitfield l4_chk_en */
#define tpol4chk_en_shift 0
/* width of bitfield l4_chk_en */
#define tpol4chk_en_width 1
/* default value of bitfield l4_chk_en */
#define tpol4chk_en_default 0x0
/* tx pkt_sys_loopback bitfield definitions
* preprocessor definitions for the bitfield "pkt_sys_loopback".
* port="pif_tpo_pkt_sys_lbk_i"
*/
/* register address for bitfield pkt_sys_loopback */
#define tpo_pkt_sys_lbk_adr 0x00007000
/* bitmask for bitfield pkt_sys_loopback */
#define tpo_pkt_sys_lbk_msk 0x00000080
/* inverted bitmask for bitfield pkt_sys_loopback */
#define tpo_pkt_sys_lbk_mskn 0xffffff7f
/* lower bit position of bitfield pkt_sys_loopback */
#define tpo_pkt_sys_lbk_shift 7
/* width of bitfield pkt_sys_loopback */
#define tpo_pkt_sys_lbk_width 1
/* default value of bitfield pkt_sys_loopback */
#define tpo_pkt_sys_lbk_default 0x0
/* tx data_tc_arb_mode bitfield definitions
* preprocessor definitions for the bitfield "data_tc_arb_mode".
* port="pif_tps_data_tc_arb_mode_i"
*/
/* register address for bitfield data_tc_arb_mode */
#define tps_data_tc_arb_mode_adr 0x00007100
/* bitmask for bitfield data_tc_arb_mode */
#define tps_data_tc_arb_mode_msk 0x00000001
/* inverted bitmask for bitfield data_tc_arb_mode */
#define tps_data_tc_arb_mode_mskn 0xfffffffe
/* lower bit position of bitfield data_tc_arb_mode */
#define tps_data_tc_arb_mode_shift 0
/* width of bitfield data_tc_arb_mode */
#define tps_data_tc_arb_mode_width 1
/* default value of bitfield data_tc_arb_mode */
#define tps_data_tc_arb_mode_default 0x0
/* tx desc_rate_ta_rst bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_ta_rst".
* port="pif_tps_desc_rate_ta_rst_i"
*/
/* register address for bitfield desc_rate_ta_rst */
#define tps_desc_rate_ta_rst_adr 0x00007310
/* bitmask for bitfield desc_rate_ta_rst */
#define tps_desc_rate_ta_rst_msk 0x80000000
/* inverted bitmask for bitfield desc_rate_ta_rst */
#define tps_desc_rate_ta_rst_mskn 0x7fffffff
/* lower bit position of bitfield desc_rate_ta_rst */
#define tps_desc_rate_ta_rst_shift 31
/* width of bitfield desc_rate_ta_rst */
#define tps_desc_rate_ta_rst_width 1
/* default value of bitfield desc_rate_ta_rst */
#define tps_desc_rate_ta_rst_default 0x0
/* tx desc_rate_limit[a:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_limit[a:0]".
* port="pif_tps_desc_rate_lim_i[10:0]"
*/
/* register address for bitfield desc_rate_limit[a:0] */
#define tps_desc_rate_lim_adr 0x00007310
/* bitmask for bitfield desc_rate_limit[a:0] */
#define tps_desc_rate_lim_msk 0x000007ff
/* inverted bitmask for bitfield desc_rate_limit[a:0] */
#define tps_desc_rate_lim_mskn 0xfffff800
/* lower bit position of bitfield desc_rate_limit[a:0] */
#define tps_desc_rate_lim_shift 0
/* width of bitfield desc_rate_limit[a:0] */
#define tps_desc_rate_lim_width 11
/* default value of bitfield desc_rate_limit[a:0] */
#define tps_desc_rate_lim_default 0x0
/* tx desc_tc_arb_mode[1:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]".
* port="pif_tps_desc_tc_arb_mode_i[1:0]"
*/
/* register address for bitfield desc_tc_arb_mode[1:0] */
#define tps_desc_tc_arb_mode_adr 0x00007200
/* bitmask for bitfield desc_tc_arb_mode[1:0] */
#define tps_desc_tc_arb_mode_msk 0x00000003
/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */
#define tps_desc_tc_arb_mode_mskn 0xfffffffc
/* lower bit position of bitfield desc_tc_arb_mode[1:0] */
#define tps_desc_tc_arb_mode_shift 0
/* width of bitfield desc_tc_arb_mode[1:0] */
#define tps_desc_tc_arb_mode_width 2
/* default value of bitfield desc_tc_arb_mode[1:0] */
#define tps_desc_tc_arb_mode_default 0x0
/* tx desc_tc{t}_credit_max[b:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]".
* parameter: tc {t} | stride size 0x4 | range [0, 7]
* port="pif_tps_desc_tc0_credit_max_i[11:0]"
*/
/* register address for bitfield desc_tc{t}_credit_max[b:0] */
#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4)
/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */
#define tps_desc_tctcredit_max_msk 0x0fff0000
/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */
#define tps_desc_tctcredit_max_mskn 0xf000ffff
/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */
#define tps_desc_tctcredit_max_shift 16
/* width of bitfield desc_tc{t}_credit_max[b:0] */
#define tps_desc_tctcredit_max_width 12
/* default value of bitfield desc_tc{t}_credit_max[b:0] */
#define tps_desc_tctcredit_max_default 0x0
/* tx desc_tc{t}_weight[8:0] bitfield definitions
* preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]".
* parameter: tc {t} | stride size 0x4 | range [0, 7]
* port="pif_tps_desc_tc0_weight_i[8:0]"
*/
/* register address for bitfield desc_tc{t}_weight[8:0] */
#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4)
/* bitmask for bitfield desc_tc{t}_weight[8:0] */
#define tps_desc_tctweight_msk 0x000001ff
/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */
#define tps_desc_tctweight_mskn 0xfffffe00
/* lower bit position of bitfield desc_tc{t}_weight[8:0] */
#define tps_desc_tctweight_shift 0
/* width of bitfield desc_tc{t}_weight[8:0] */
#define tps_desc_tctweight_width 9
/* default value of bitfield desc_tc{t}_weight[8:0] */
#define tps_desc_tctweight_default 0x0
/* tx desc_vm_arb_mode bitfield definitions
* preprocessor definitions for the bitfield "desc_vm_arb_mode".
* port="pif_tps_desc_vm_arb_mode_i"
*/
/* register address for bitfield desc_vm_arb_mode */
#define tps_desc_vm_arb_mode_adr 0x00007300
/* bitmask for bitfield desc_vm_arb_mode */
#define tps_desc_vm_arb_mode_msk 0x00000001
/* inverted bitmask for bitfield desc_vm_arb_mode */
#define tps_desc_vm_arb_mode_mskn 0xfffffffe
/* lower bit position of bitfield desc_vm_arb_mode */
#define tps_desc_vm_arb_mode_shift 0
/* width of bitfield desc_vm_arb_mode */
#define tps_desc_vm_arb_mode_width 1
/* default value of bitfield desc_vm_arb_mode */
#define tps_desc_vm_arb_mode_default 0x0
/* tx data_tc{t}_credit_max[b:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
* parameter: tc {t} | stride size 0x4 | range [0, 7]
* port="pif_tps_data_tc0_credit_max_i[11:0]"
*/
/* register address for bitfield data_tc{t}_credit_max[b:0] */
#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
#define tps_data_tctcredit_max_msk 0x0fff0000
/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
#define tps_data_tctcredit_max_mskn 0xf000ffff
/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
#define tps_data_tctcredit_max_shift 16
/* width of bitfield data_tc{t}_credit_max[b:0] */
#define tps_data_tctcredit_max_width 12
/* default value of bitfield data_tc{t}_credit_max[b:0] */
#define tps_data_tctcredit_max_default 0x0
/* tx data_tc{t}_weight[8:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
* parameter: tc {t} | stride size 0x4 | range [0, 7]
* port="pif_tps_data_tc0_weight_i[8:0]"
*/
/* register address for bitfield data_tc{t}_weight[8:0] */
#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_weight[8:0] */
#define tps_data_tctweight_msk 0x000001ff
/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
#define tps_data_tctweight_mskn 0xfffffe00
/* lower bit position of bitfield data_tc{t}_weight[8:0] */
#define tps_data_tctweight_shift 0
/* width of bitfield data_tc{t}_weight[8:0] */
#define tps_data_tctweight_width 9
/* default value of bitfield data_tc{t}_weight[8:0] */
#define tps_data_tctweight_default 0x0
/* tx reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
* port="pif_tx_reg_res_dsbl_i"
*/
/* register address for bitfield reg_res_dsbl */
#define tx_reg_res_dsbl_adr 0x00007000
/* bitmask for bitfield reg_res_dsbl */
#define tx_reg_res_dsbl_msk 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
#define tx_reg_res_dsbl_mskn 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
#define tx_reg_res_dsbl_shift 29
/* width of bitfield reg_res_dsbl */
#define tx_reg_res_dsbl_width 1
/* default value of bitfield reg_res_dsbl */
#define tx_reg_res_dsbl_default 0x1
/* mac_phy register access busy bitfield definitions
* preprocessor definitions for the bitfield "register access busy".
* port="msm_pif_reg_busy_o"
*/
/* register address for bitfield register access busy */
#define msm_reg_access_busy_adr 0x00004400
/* bitmask for bitfield register access busy */
#define msm_reg_access_busy_msk 0x00001000
/* inverted bitmask for bitfield register access busy */
#define msm_reg_access_busy_mskn 0xffffefff
/* lower bit position of bitfield register access busy */
#define msm_reg_access_busy_shift 12
/* width of bitfield register access busy */
#define msm_reg_access_busy_width 1
/* mac_phy msm register address[7:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register address[7:0]".
* port="pif_msm_reg_addr_i[7:0]"
*/
/* register address for bitfield msm register address[7:0] */
#define msm_reg_addr_adr 0x00004400
/* bitmask for bitfield msm register address[7:0] */
#define msm_reg_addr_msk 0x000000ff
/* inverted bitmask for bitfield msm register address[7:0] */
#define msm_reg_addr_mskn 0xffffff00
/* lower bit position of bitfield msm register address[7:0] */
#define msm_reg_addr_shift 0
/* width of bitfield msm register address[7:0] */
#define msm_reg_addr_width 8
/* default value of bitfield msm register address[7:0] */
#define msm_reg_addr_default 0x0
/* mac_phy register read strobe bitfield definitions
* preprocessor definitions for the bitfield "register read strobe".
* port="pif_msm_reg_rden_i"
*/
/* register address for bitfield register read strobe */
#define msm_reg_rd_strobe_adr 0x00004400
/* bitmask for bitfield register read strobe */
#define msm_reg_rd_strobe_msk 0x00000200
/* inverted bitmask for bitfield register read strobe */
#define msm_reg_rd_strobe_mskn 0xfffffdff
/* lower bit position of bitfield register read strobe */
#define msm_reg_rd_strobe_shift 9
/* width of bitfield register read strobe */
#define msm_reg_rd_strobe_width 1
/* default value of bitfield register read strobe */
#define msm_reg_rd_strobe_default 0x0
/* mac_phy msm register read data[31:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register read data[31:0]".
* port="msm_pif_reg_rd_data_o[31:0]"
*/
/* register address for bitfield msm register read data[31:0] */
#define msm_reg_rd_data_adr 0x00004408
/* bitmask for bitfield msm register read data[31:0] */
#define msm_reg_rd_data_msk 0xffffffff
/* inverted bitmask for bitfield msm register read data[31:0] */
#define msm_reg_rd_data_mskn 0x00000000
/* lower bit position of bitfield msm register read data[31:0] */
#define msm_reg_rd_data_shift 0
/* width of bitfield msm register read data[31:0] */
#define msm_reg_rd_data_width 32
/* mac_phy msm register write data[31:0] bitfield definitions
* preprocessor definitions for the bitfield "msm register write data[31:0]".
* port="pif_msm_reg_wr_data_i[31:0]"
*/
/* register address for bitfield msm register write data[31:0] */
#define msm_reg_wr_data_adr 0x00004404
/* bitmask for bitfield msm register write data[31:0] */
#define msm_reg_wr_data_msk 0xffffffff
/* inverted bitmask for bitfield msm register write data[31:0] */
#define msm_reg_wr_data_mskn 0x00000000
/* lower bit position of bitfield msm register write data[31:0] */
#define msm_reg_wr_data_shift 0
/* width of bitfield msm register write data[31:0] */
#define msm_reg_wr_data_width 32
/* default value of bitfield msm register write data[31:0] */
#define msm_reg_wr_data_default 0x0
/* mac_phy register write strobe bitfield definitions
* preprocessor definitions for the bitfield "register write strobe".
* port="pif_msm_reg_wren_i"
*/
/* register address for bitfield register write strobe */
#define msm_reg_wr_strobe_adr 0x00004400
/* bitmask for bitfield register write strobe */
#define msm_reg_wr_strobe_msk 0x00000100
/* inverted bitmask for bitfield register write strobe */
#define msm_reg_wr_strobe_mskn 0xfffffeff
/* lower bit position of bitfield register write strobe */
#define msm_reg_wr_strobe_shift 8
/* width of bitfield register write strobe */
#define msm_reg_wr_strobe_width 1
/* default value of bitfield register write strobe */
#define msm_reg_wr_strobe_default 0x0
/* mif soft reset bitfield definitions
* preprocessor definitions for the bitfield "soft reset".
* port="pif_glb_res_i"
*/
/* register address for bitfield soft reset */
#define glb_soft_res_adr 0x00000000
/* bitmask for bitfield soft reset */
#define glb_soft_res_msk 0x00008000
/* inverted bitmask for bitfield soft reset */
#define glb_soft_res_mskn 0xffff7fff
/* lower bit position of bitfield soft reset */
#define glb_soft_res_shift 15
/* width of bitfield soft reset */
#define glb_soft_res_width 1
/* default value of bitfield soft reset */
#define glb_soft_res_default 0x0
/* mif register reset disable bitfield definitions
* preprocessor definitions for the bitfield "register reset disable".
* port="pif_glb_reg_res_dsbl_i"
*/
/* register address for bitfield register reset disable */
#define glb_reg_res_dis_adr 0x00000000
/* bitmask for bitfield register reset disable */
#define glb_reg_res_dis_msk 0x00004000
/* inverted bitmask for bitfield register reset disable */
#define glb_reg_res_dis_mskn 0xffffbfff
/* lower bit position of bitfield register reset disable */
#define glb_reg_res_dis_shift 14
/* width of bitfield register reset disable */
#define glb_reg_res_dis_width 1
/* default value of bitfield register reset disable */
#define glb_reg_res_dis_default 0x1
/* tx dma debug control definitions */
#define tx_dma_debug_ctl_adr 0x00008920u
/* tx dma descriptor base address msw definitions */
#define tx_dma_desc_base_addrmsw_adr(descriptor) \
(0x00007c04u + (descriptor) * 0x40)
/* tx interrupt moderation control register definitions
* Preprocessor definitions for TX Interrupt Moderation Control Register
* Base Address: 0x00008980
* Parameter: queue {Q} | stride size 0x4 | range [0, 31]
*/
#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4)
/* pcie reg_res_dsbl bitfield definitions
* preprocessor definitions for the bitfield "reg_res_dsbl".
* port="pif_pci_reg_res_dsbl_i"
*/
/* register address for bitfield reg_res_dsbl */
#define pci_reg_res_dsbl_adr 0x00001000
/* bitmask for bitfield reg_res_dsbl */
#define pci_reg_res_dsbl_msk 0x20000000
/* inverted bitmask for bitfield reg_res_dsbl */
#define pci_reg_res_dsbl_mskn 0xdfffffff
/* lower bit position of bitfield reg_res_dsbl */
#define pci_reg_res_dsbl_shift 29
/* width of bitfield reg_res_dsbl */
#define pci_reg_res_dsbl_width 1
/* default value of bitfield reg_res_dsbl */
#define pci_reg_res_dsbl_default 0x1
/* global microprocessor scratch pad definitions */
#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
#endif /* HW_ATL_LLH_INTERNAL_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
* abstraction layer.
*/
#include "../aq_hw.h"
#include "../aq_hw_utils.h"
#include "../aq_pci_func.h"
#include "../aq_ring.h"
#include "../aq_vec.h"
#include "hw_atl_utils.h"
#include "hw_atl_llh.h"
#include <linux/random.h>
#define HW_ATL_UCP_0X370_REG 0x0370U
#define HW_ATL_FW_SM_RAM 0x2U
#define HW_ATL_MPI_CONTROL_ADR 0x0368U
#define HW_ATL_MPI_STATE_ADR 0x036CU
#define HW_ATL_MPI_STATE_MSK 0x00FFU
#define HW_ATL_MPI_STATE_SHIFT 0U
#define HW_ATL_MPI_SPEED_MSK 0xFFFFU
#define HW_ATL_MPI_SPEED_SHIFT 16U
static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
u32 *p, u32 cnt)
{
int err = 0;
AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self,
HW_ATL_FW_SM_RAM) == 1U,
1U, 10000U);
if (err < 0) {
bool is_locked;
reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
if (!is_locked) {
err = -ETIME;
goto err_exit;
}
}
aq_hw_write_reg(self, 0x00000208U, a);
for (++cnt; --cnt;) {
u32 i = 0U;
aq_hw_write_reg(self, 0x00000200U, 0x00008000U);
for (i = 1024U;
(0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) {
}
*(p++) = aq_hw_read_reg(self, 0x0000020CU);
}
reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
err_exit:
return err;
}
static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
u32 cnt)
{
int err = 0;
bool is_locked;
is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
if (!is_locked) {
err = -ETIME;
goto err_exit;
}
aq_hw_write_reg(self, 0x00000208U, a);
for (++cnt; --cnt;) {
u32 i = 0U;
aq_hw_write_reg(self, 0x0000020CU, *(p++));
aq_hw_write_reg(self, 0x00000200U, 0xC000U);
for (i = 1024U;
(0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) {
}
}
reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
err_exit:
return err;
}
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
{
int err = 0;
const u32 dw_major_mask = 0xff000000U;
const u32 dw_minor_mask = 0x00ffffffU;
err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0;
if (err < 0)
goto err_exit;
err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
-EOPNOTSUPP : 0;
err_exit:
return err;
}
static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps)
{
int err = 0;
if (!aq_hw_read_reg(self, 0x370U)) {
unsigned int rnd = 0U;
unsigned int ucp_0x370 = 0U;
get_random_bytes(&rnd, sizeof(unsigned int));
ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd);
aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
}
reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
/* check 10 times by 1ms */
AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr =
aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
aq_hw_read_reg(self, 0x18U));
return err;
}
#define HW_ATL_RPC_CONTROL_ADR 0x0338U
#define HW_ATL_RPC_STATE_ADR 0x033CU
struct aq_hw_atl_utils_fw_rpc_tid_s {
union {
u32 val;
struct {
u16 tid;
u16 len;
};
};
};
#define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL)
static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
{
int err = 0;
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
if (!IS_CHIP_FEATURE(MIPS)) {
err = -1;
goto err_exit;
}
err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr,
(u32 *)(void *)&PHAL_ATLANTIC->rpc,
(rpc_size + sizeof(u32) -
sizeof(u8)) / sizeof(u32));
if (err < 0)
goto err_exit;
sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid);
sw.len = (u16)rpc_size;
aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
err_exit:
return err;
}
static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
struct hw_aq_atl_utils_fw_rpc **rpc)
{
int err = 0;
struct aq_hw_atl_utils_fw_rpc_tid_s sw;
struct aq_hw_atl_utils_fw_rpc_tid_s fw;
do {
sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
PHAL_ATLANTIC->rpc_tid = sw.tid;
AQ_HW_WAIT_FOR(sw.tid ==
(fw.val =
aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR),
fw.tid), 1000U, 100U);
if (err < 0)
goto err_exit;
if (fw.len == 0xFFFFU) {
err = hw_atl_utils_fw_rpc_call(self, sw.len);
if (err < 0)
goto err_exit;
}
} while (sw.tid != fw.tid || 0xFFFFU == fw.len);
if (err < 0)
goto err_exit;
if (rpc) {
if (fw.len) {
err =
hw_atl_utils_fw_downld_dwords(self,
PHAL_ATLANTIC->rpc_addr,
(u32 *)(void *)
&PHAL_ATLANTIC->rpc,
(fw.len + sizeof(u32) -
sizeof(u8)) /
sizeof(u32));
if (err < 0)
goto err_exit;
}
*rpc = &PHAL_ATLANTIC->rpc;
}
err_exit:
return err;
}
static int hw_atl_utils_mpi_create(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps)
{
int err = 0;
err = hw_atl_utils_init_ucp(self, aq_hw_caps);
if (err < 0)
goto err_exit;
err = hw_atl_utils_fw_rpc_init(self);
if (err < 0)
goto err_exit;
err_exit:
return err;
}
void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
struct hw_aq_atl_utils_mbox *pmbox)
{
int err = 0;
err = hw_atl_utils_fw_downld_dwords(self,
PHAL_ATLANTIC->mbox_addr,
(u32 *)(void *)pmbox,
sizeof(*pmbox) / sizeof(u32));
if (err < 0)
goto err_exit;
if (pmbox != &PHAL_ATLANTIC->mbox)
memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox));
if (IS_CHIP_FEATURE(REVISION_A0)) {
unsigned int mtu = self->aq_nic_cfg ?
self->aq_nic_cfg->mtu : 1514U;
pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc);
} else {
pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self);
}
err_exit:;
}
int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
enum hal_atl_utils_fw_state_e state)
{
u32 ucp_0x368 = 0;
ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state;
aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368);
return 0;
}
void hw_atl_utils_mpi_set(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state, u32 speed)
{
int err = 0;
u32 transaction_id = 0;
if (state == MPI_RESET) {
hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
transaction_id = PHAL_ATLANTIC->mbox.transaction_id;
AQ_HW_WAIT_FOR(transaction_id !=
(hw_atl_utils_mpi_read_stats
(self, &PHAL_ATLANTIC->mbox),
PHAL_ATLANTIC->mbox.transaction_id),
1000U, 100U);
if (err < 0)
goto err_exit;
}
err = hw_atl_utils_mpi_set_speed(self, speed, state);
err_exit:;
}
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
struct aq_hw_link_status_s *link_status)
{
u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
if (!link_speed_mask) {
link_status->mbps = 0U;
} else {
switch (link_speed_mask) {
case HAL_ATLANTIC_RATE_10G:
link_status->mbps = 10000U;
break;
case HAL_ATLANTIC_RATE_5G:
case HAL_ATLANTIC_RATE_5GSR:
link_status->mbps = 5000U;
break;
case HAL_ATLANTIC_RATE_2GS:
link_status->mbps = 2500U;
break;
case HAL_ATLANTIC_RATE_1G:
link_status->mbps = 1000U;
break;
case HAL_ATLANTIC_RATE_100M:
link_status->mbps = 100U;
break;
default:
link_status->mbps = 0U;
break;
}
}
return 0;
}
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
u8 *mac)
{
int err = 0;
u32 h = 0U;
u32 l = 0U;
u32 mac_addr[2];
self->mmio = aq_pci_func_get_mmio(self->aq_pci_func);
hw_atl_utils_hw_chip_features_init(self,
&PHAL_ATLANTIC_A0->chip_features);
err = hw_atl_utils_mpi_create(self, aq_hw_caps);
if (err < 0)
goto err_exit;
if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
unsigned int rnd = 0;
unsigned int ucp_0x370 = 0;
get_random_bytes(&rnd, sizeof(unsigned int));
ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
}
err = hw_atl_utils_fw_downld_dwords(self,
aq_hw_read_reg(self, 0x00000374U) +
(40U * 4U),
mac_addr,
AQ_DIMOF(mac_addr));
if (err < 0) {
mac_addr[0] = 0U;
mac_addr[1] = 0U;
err = 0;
} else {
mac_addr[0] = __swab32(mac_addr[0]);
mac_addr[1] = __swab32(mac_addr[1]);
}
ether_addr_copy(mac, (u8 *)mac_addr);
if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
/* chip revision */
l = 0xE3000000U
| (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG))
| (0x00 << 16);
h = 0x8001300EU;
mac[5] = (u8)(0xFFU & l);
l >>= 8;
mac[4] = (u8)(0xFFU & l);
l >>= 8;
mac[3] = (u8)(0xFFU & l);
l >>= 8;
mac[2] = (u8)(0xFFU & l);
mac[1] = (u8)(0xFFU & h);
h >>= 8;
mac[0] = (u8)(0xFFU & h);
}
err_exit:
return err;
}
unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
{
unsigned int ret = 0U;
switch (mbps) {
case 100U:
ret = 5U;
break;
case 1000U:
ret = 4U;
break;
case 2500U:
ret = 3U;
break;
case 5000U:
ret = 1U;
break;
case 10000U:
ret = 0U;
break;
default:
break;
}
return ret;
}
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
{
u32 chip_features = 0U;
u32 val = reg_glb_mif_id_get(self);
u32 mif_rev = val & 0xFFU;
if ((3U & mif_rev) == 1U) {
chip_features |=
HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
HAL_ATLANTIC_UTILS_CHIP_MIPS;
} else if ((3U & mif_rev) == 2U) {
chip_features |=
HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
HAL_ATLANTIC_UTILS_CHIP_MIPS |
HAL_ATLANTIC_UTILS_CHIP_TPO2 |
HAL_ATLANTIC_UTILS_CHIP_RPF2;
}
*p = chip_features;
}
int hw_atl_utils_hw_deinit(struct aq_hw_s *self)
{
hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U);
return 0;
}
int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
unsigned int power_state)
{
hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U);
return 0;
}
int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
u64 *data, unsigned int *p_count)
{
struct hw_atl_stats_s *stats = NULL;
int i = 0;
hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
stats = &PHAL_ATLANTIC->mbox.stats;
data[i] = stats->uprc + stats->mprc + stats->bprc;
data[++i] = stats->uprc;
data[++i] = stats->mprc;
data[++i] = stats->bprc;
data[++i] = stats->erpt;
data[++i] = stats->uptc + stats->mptc + stats->bptc;
data[++i] = stats->uptc;
data[++i] = stats->mptc;
data[++i] = stats->bptc;
data[++i] = stats->ubrc;
data[++i] = stats->ubtc;
data[++i] = stats->mbrc;
data[++i] = stats->mbtc;
data[++i] = stats->bbrc;
data[++i] = stats->bbtc;
data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self);
data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self);
data[++i] = stats_rx_dma_good_octet_counterlsw_get(self);
data[++i] = stats_tx_dma_good_octet_counterlsw_get(self);
data[++i] = stats->dpc;
if (p_count)
*p_count = ++i;
return 0;
}
static const u32 hw_atl_utils_hw_mac_regs[] = {
0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U,
0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U,
0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U,
0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U,
0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U,
0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U,
0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U,
0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U,
0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U,
0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U,
0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U,
0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U,
0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U,
0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U,
0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U,
0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U,
0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU,
0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU,
0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U,
0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U,
0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U,
0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U,
};
int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff)
{
unsigned int i = 0U;
for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
regs_buff[i] = aq_hw_read_reg(self,
hw_atl_utils_hw_mac_regs[i]);
return 0;
}
int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
{
*fw_version = aq_hw_read_reg(self, 0x18U);
return 0;
}
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
* abstraction layer.
*/
#ifndef HW_ATL_UTILS_H
#define HW_ATL_UTILS_H
#include "../aq_common.h"
#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); }
struct __packed hw_atl_stats_s {
u32 uprc;
u32 mprc;
u32 bprc;
u32 erpt;
u32 uptc;
u32 mptc;
u32 bptc;
u32 erpr;
u32 mbtc;
u32 bbtc;
u32 mbrc;
u32 bbrc;
u32 ubrc;
u32 ubtc;
u32 dpc;
};
union __packed ip_addr {
struct {
u8 addr[16];
} v6;
struct {
u8 padding[12];
u8 addr[4];
} v4;
};
struct __packed hw_aq_atl_utils_fw_rpc {
u32 msg_id;
union {
struct {
u32 pong;
} msg_ping;
struct {
u8 mac_addr[6];
u32 ip_addr_cnt;
struct {
union ip_addr addr;
union ip_addr mask;
} ip[1];
} msg_arp;
struct {
u32 len;
u8 packet[1514U];
} msg_inject;
struct {
u32 priority;
u32 wol_packet_type;
u16 friendly_name_len;
u16 friendly_name[65];
u32 pattern_id;
u32 next_wol_pattern_offset;
union {
struct {
u32 flags;
u8 ipv4_source_address[4];
u8 ipv4_dest_address[4];
u16 tcp_source_port_number;
u16 tcp_dest_port_number;
} ipv4_tcp_syn_parameters;
struct {
u32 flags;
u8 ipv6_source_address[16];
u8 ipv6_dest_address[16];
u16 tcp_source_port_number;
u16 tcp_dest_port_number;
} ipv6_tcp_syn_parameters;
struct {
u32 flags;
} eapol_request_id_message_parameters;
struct {
u32 flags;
u32 mask_offset;
u32 mask_size;
u32 pattern_offset;
u32 pattern_size;
} wol_bit_map_pattern;
} wol_pattern;
} msg_wol;
struct {
u32 is_wake_on_link_down;
u32 is_wake_on_link_up;
} msg_wolink;
};
};
struct __packed hw_aq_atl_utils_mbox {
u32 version;
u32 transaction_id;
int error;
struct hw_atl_stats_s stats;
};
struct __packed hw_atl_s {
struct aq_hw_s base;
struct hw_aq_atl_utils_mbox mbox;
u64 speed;
u32 itr_tx;
u32 itr_rx;
unsigned int chip_features;
u32 fw_ver_actual;
atomic_t dpc;
u32 mbox_addr;
u32 rpc_addr;
u32 rpc_tid;
struct hw_aq_atl_utils_fw_rpc rpc;
};
#define SELF ((struct hw_atl_s *)self)
#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self)))
#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self)))
#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self)))
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U
#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U
#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
PHAL_ATLANTIC->chip_features)
enum hal_atl_utils_fw_state_e {
MPI_DEINIT = 0,
MPI_RESET = 1,
MPI_INIT = 2,
MPI_POWER = 4,
};
#define HAL_ATLANTIC_RATE_10G BIT(0)
#define HAL_ATLANTIC_RATE_5G BIT(1)
#define HAL_ATLANTIC_RATE_5GSR BIT(2)
#define HAL_ATLANTIC_RATE_2GS BIT(3)
#define HAL_ATLANTIC_RATE_1G BIT(4)
#define HAL_ATLANTIC_RATE_100M BIT(5)
#define HAL_ATLANTIC_RATE_INVALID BIT(6)
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
struct hw_aq_atl_utils_mbox *pmbox);
void hw_atl_utils_mpi_set(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state,
u32 speed);
int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
enum hal_atl_utils_fw_state_e state);
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
struct aq_hw_link_status_s *link_status);
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
u8 *mac);
unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff);
int hw_atl_utils_hw_get_settings(struct aq_hw_s *self,
struct ethtool_cmd *cmd);
int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
unsigned int power_state);
int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
u64 *data,
unsigned int *p_count);
#endif /* HW_ATL_UTILS_H */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
#ifndef VER_H
#define VER_H
#define NIC_MAJOR_DRIVER_VERSION 1
#define NIC_MINOR_DRIVER_VERSION 5
#define NIC_BUILD_DRIVER_VERSION 345
#define NIC_REVISION_DRIVER_VERSION 0
#endif /* VER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment