Commit b79f91f1 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-atlantic-QoS-implementation'

Igor Russkikh says:

====================
net: atlantic: QoS implementation

This patch series adds support for mqprio rate limiters and multi-TC:
 * max_rate is supported on both A1 and A2;
 * min_rate is supported on A2 only;

This is a joint work of Mark and Dmitry.

To implement this feature, a couple of rearrangements and code
improvements were done, in areas of TC/ring management, allocation
control, etc.

One of the problems we faced is conflicting ptp functionality, which
consumes a whole traffic class due to hardware limitations.
Patches below have a more detailed description on how PTP and multi-TC
co-exist right now.

v2:
 * accommodated review comments (-Wmissing-prototypes and
   -Wunused-but-set-variable findings);
 * added user notification in case of conflicting multi-TC<->PTP
   configuration;
 * added automatic PTP disabling, if a conflicting configuration is
   detected;
 * removed module param, which was used for PTP disabling in v1;

v1: https://patchwork.ozlabs.org/cover/1294380/
====================
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 59b8d277 40f05e5b
......@@ -88,13 +88,13 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
"InDroppedDma",
};
static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
"Queue[%d] InPackets",
"Queue[%d] OutPackets",
"Queue[%d] Restarts",
"Queue[%d] InJumboPackets",
"Queue[%d] InLroPackets",
"Queue[%d] InErrors",
static const char * const aq_ethtool_queue_stat_names[] = {
"%sQueue[%d] InPackets",
"%sQueue[%d] OutPackets",
"%sQueue[%d] Restarts",
"%sQueue[%d] InJumboPackets",
"%sQueue[%d] InLroPackets",
"%sQueue[%d] InErrors",
};
#if IS_ENABLED(CONFIG_MACSEC)
......@@ -166,7 +166,8 @@ static u32 aq_ethtool_n_stats(struct net_device *ndev)
struct aq_nic_s *nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(nic);
u32 n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs;
ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs *
cfg->tcs;
#if IS_ENABLED(CONFIG_MACSEC)
if (nic->macsec_cfg) {
......@@ -223,7 +224,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
static void aq_ethtool_get_strings(struct net_device *ndev,
u32 stringset, u8 *data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_s *nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
u8 *p = data;
int i, si;
......@@ -231,24 +232,35 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
int sa;
#endif
cfg = aq_nic_get_cfg(aq_nic);
cfg = aq_nic_get_cfg(nic);
switch (stringset) {
case ETH_SS_STATS:
case ETH_SS_STATS: {
const int stat_cnt = ARRAY_SIZE(aq_ethtool_queue_stat_names);
char tc_string[8];
int tc;
memset(tc_string, 0, sizeof(tc_string));
memcpy(p, aq_ethtool_stat_names,
sizeof(aq_ethtool_stat_names));
p = p + sizeof(aq_ethtool_stat_names);
for (i = 0; i < cfg->vecs; i++) {
for (si = 0;
si < ARRAY_SIZE(aq_ethtool_queue_stat_names);
si++) {
snprintf(p, ETH_GSTRING_LEN,
aq_ethtool_queue_stat_names[si], i);
p += ETH_GSTRING_LEN;
for (tc = 0; tc < cfg->tcs; tc++) {
if (cfg->is_qos)
snprintf(tc_string, 8, "TC%d ", tc);
for (i = 0; i < cfg->vecs; i++) {
for (si = 0; si < stat_cnt; si++) {
snprintf(p, ETH_GSTRING_LEN,
aq_ethtool_queue_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
p += ETH_GSTRING_LEN;
}
}
}
#if IS_ENABLED(CONFIG_MACSEC)
if (!aq_nic->macsec_cfg)
if (!nic->macsec_cfg)
break;
memcpy(p, aq_macsec_stat_names, sizeof(aq_macsec_stat_names));
......@@ -256,7 +268,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
struct aq_macsec_txsc *aq_txsc;
if (!(test_bit(i, &aq_nic->macsec_cfg->txsc_idx_busy)))
if (!(test_bit(i, &nic->macsec_cfg->txsc_idx_busy)))
continue;
for (si = 0;
......@@ -266,7 +278,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
aq_macsec_txsc_stat_names[si], i);
p += ETH_GSTRING_LEN;
}
aq_txsc = &aq_nic->macsec_cfg->aq_txsc[i];
aq_txsc = &nic->macsec_cfg->aq_txsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
if (!(test_bit(sa, &aq_txsc->tx_sa_idx_busy)))
continue;
......@@ -283,10 +295,10 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
struct aq_macsec_rxsc *aq_rxsc;
if (!(test_bit(i, &aq_nic->macsec_cfg->rxsc_idx_busy)))
if (!(test_bit(i, &nic->macsec_cfg->rxsc_idx_busy)))
continue;
aq_rxsc = &aq_nic->macsec_cfg->aq_rxsc[i];
aq_rxsc = &nic->macsec_cfg->aq_rxsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
if (!(test_bit(sa, &aq_rxsc->rx_sa_idx_busy)))
continue;
......@@ -302,6 +314,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
}
#endif
break;
}
case ETH_SS_PRIV_FLAGS:
memcpy(p, aq_ethtool_priv_flag_names,
sizeof(aq_ethtool_priv_flag_names));
......@@ -780,8 +793,6 @@ static int aq_set_ringparam(struct net_device *ndev,
dev_close(ndev);
}
aq_nic_free_vectors(aq_nic);
cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
cfg->rxds = min(cfg->rxds, hw_caps->rxds_max);
cfg->rxds = ALIGN(cfg->rxds, AQ_HW_RXD_MULTIPLE);
......@@ -790,15 +801,10 @@ static int aq_set_ringparam(struct net_device *ndev,
cfg->txds = min(cfg->txds, hw_caps->txds_max);
cfg->txds = ALIGN(cfg->txds, AQ_HW_TXD_MULTIPLE);
for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < cfg->vecs;
aq_nic->aq_vecs++) {
aq_nic->aq_vec[aq_nic->aq_vecs] =
aq_vec_alloc(aq_nic, aq_nic->aq_vecs, cfg);
if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
err = -ENOMEM;
goto err_exit;
}
}
err = aq_nic_realloc_vectors(aq_nic);
if (err)
goto err_exit;
if (ndev_running)
err = dev_open(ndev, NULL);
......
......@@ -153,6 +153,8 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
fsp->location > AQ_RX_LAST_LOC_FVLANID) {
netdev_err(aq_nic->ndev,
......@@ -170,10 +172,10 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
return -EINVAL;
}
if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
if (fsp->ring_cookie > cfg->num_rss_queues * cfg->tcs) {
netdev_err(aq_nic->ndev,
"ethtool: queue number must be in range [0, %d]",
aq_nic->aq_nic_cfg.num_rss_queues - 1);
cfg->num_rss_queues * cfg->tcs - 1);
return -EINVAL;
}
return 0;
......@@ -262,6 +264,7 @@ static bool __must_check
aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
struct ethtool_rx_flow_spec *fsp)
{
struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
bool rule_is_not_correct = false;
if (!aq_nic) {
......@@ -274,11 +277,11 @@ aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
} else if (aq_check_filter(aq_nic, fsp)) {
rule_is_not_correct = true;
} else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
if (fsp->ring_cookie >= cfg->num_rss_queues * cfg->tcs) {
netdev_err(aq_nic->ndev,
"ethtool: The specified action is invalid.\n"
"Maximum allowable value action is %u.\n",
aq_nic->aq_nic_cfg.num_rss_queues - 1);
cfg->num_rss_queues * cfg->tcs - 1);
rule_is_not_correct = true;
}
}
......
......@@ -18,6 +18,12 @@
#define AQ_HW_MAC_COUNTER_HZ 312500000ll
#define AQ_HW_PHY_COUNTER_HZ 160000000ll
enum aq_tc_mode {
AQ_TC_MODE_INVALID = -1,
AQ_TC_MODE_8TCS,
AQ_TC_MODE_4TCS,
};
#define AQ_RX_FIRST_LOC_FVLANID 0U
#define AQ_RX_LAST_LOC_FVLANID 15U
#define AQ_RX_FIRST_LOC_FETHERT 16U
......@@ -29,6 +35,9 @@
(AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU
/* Used for rate to Mbps conversion */
#define AQ_MBPS_DIVISOR 125000 /* 1000000 / 8 */
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
......@@ -46,7 +55,7 @@ struct aq_hw_caps_s {
u32 mac_regs_count;
u32 hw_alive_check_addr;
u8 msix_irqs;
u8 tcs;
u8 tcs_max;
u8 rxd_alignment;
u8 rxd_size;
u8 txd_alignment;
......@@ -118,8 +127,11 @@ struct aq_stats_s {
#define AQ_HW_TXD_MULTIPLE 8U
#define AQ_HW_RXD_MULTIPLE 8U
#define AQ_HW_QUEUES_MAX 32U
#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
#define AQ_HW_PTP_TC 2U
#define AQ_HW_LED_BLINK 0x2U
#define AQ_HW_LED_DEFAULT 0x0U
......@@ -268,6 +280,8 @@ struct aq_hw_ops {
int (*hw_rss_hash_set)(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params);
int (*hw_tc_rate_limit_set)(struct aq_hw_s *self);
int (*hw_get_regs)(struct aq_hw_s *self,
const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff);
......@@ -279,10 +293,6 @@ struct aq_hw_ops {
int (*hw_set_offload)(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
int (*hw_tx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
int (*hw_rx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
int (*hw_ring_hwts_rx_fill)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
......
......@@ -79,3 +79,29 @@ int aq_hw_err_from_flags(struct aq_hw_s *hw)
err_exit:
return err;
}
int aq_hw_num_tcs(struct aq_hw_s *hw)
{
switch (hw->aq_nic_cfg->tc_mode) {
case AQ_TC_MODE_8TCS:
return 8;
case AQ_TC_MODE_4TCS:
return 4;
default:
break;
}
return 1;
}
int aq_hw_q_per_tc(struct aq_hw_s *hw)
{
switch (hw->aq_nic_cfg->tc_mode) {
case AQ_TC_MODE_8TCS:
return 4;
case AQ_TC_MODE_4TCS:
return 8;
default:
return 4;
}
}
......@@ -34,5 +34,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
int aq_hw_num_tcs(struct aq_hw_s *hw);
int aq_hw_q_per_tc(struct aq_hw_s *hw);
#endif /* AQ_HW_UTILS_H */
......@@ -478,7 +478,7 @@ static int aq_mdo_add_secy(struct macsec_context *ctx)
set_bit(txsc_idx, &cfg->txsc_idx_busy);
return 0;
return ret;
}
static int aq_mdo_upd_secy(struct macsec_context *ctx)
......
......@@ -12,11 +12,13 @@
#include "aq_ethtool.h"
#include "aq_ptp.h"
#include "aq_filters.h"
#include "aq_hw_utils.h"
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
......@@ -38,7 +40,7 @@ struct net_device *aq_ndev_alloc(void)
struct net_device *ndev = NULL;
struct aq_nic_s *aq_nic = NULL;
ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_HW_QUEUES_MAX);
if (!ndev)
return NULL;
......@@ -330,6 +332,73 @@ static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
return 0;
}
static int aq_validate_mqprio_opt(struct aq_nic_s *self,
struct tc_mqprio_qopt_offload *mqprio,
const unsigned int num_tc)
{
const bool has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(self);
const unsigned int tcs_max = min_t(u8, aq_nic_cfg->aq_hw_caps->tcs_max,
AQ_CFG_TCS_MAX);
if (num_tc > tcs_max) {
netdev_err(self->ndev, "Too many TCs requested\n");
return -EOPNOTSUPP;
}
if (num_tc != 0 && !is_power_of_2(num_tc)) {
netdev_err(self->ndev, "TC count should be power of 2\n");
return -EOPNOTSUPP;
}
if (has_min_rate && !ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
netdev_err(self->ndev, "Min tx rate is not supported\n");
return -EOPNOTSUPP;
}
return 0;
}
static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct tc_mqprio_qopt_offload *mqprio = type_data;
struct aq_nic_s *aq_nic = netdev_priv(dev);
bool has_min_rate;
bool has_max_rate;
int err;
int i;
if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
err = aq_validate_mqprio_opt(aq_nic, mqprio, mqprio->qopt.num_tc);
if (err)
return err;
for (i = 0; i < mqprio->qopt.num_tc; i++) {
if (has_max_rate) {
u64 max_rate = mqprio->max_rate[i];
do_div(max_rate, AQ_MBPS_DIVISOR);
aq_nic_setup_tc_max_rate(aq_nic, i, (u32)max_rate);
}
if (has_min_rate) {
u64 min_rate = mqprio->min_rate[i];
do_div(min_rate, AQ_MBPS_DIVISOR);
aq_nic_setup_tc_min_rate(aq_nic, i, (u32)min_rate);
}
}
return aq_nic_setup_tc_mqprio(aq_nic, mqprio->qopt.num_tc,
mqprio->qopt.prio_tc_map);
}
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
......@@ -341,6 +410,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_do_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
.ndo_setup_tc = aq_ndo_setup_tc,
};
static int __init aq_ndev_init_module(void)
......
......@@ -26,6 +26,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <net/ip.h>
#include <net/pkt_cls.h>
static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
module_param_named(aq_itr, aq_itr, uint, 0644);
......@@ -64,10 +65,38 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
rss_params->indirection_table[i] = i & (num_rss_queues - 1);
}
/* Recalculate the number of vectors */
static void aq_nic_cfg_update_num_vecs(struct aq_nic_s *self)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
cfg->vecs = min(cfg->vecs, num_online_cpus());
if (self->irqvecs > AQ_HW_SERVICE_IRQS)
cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
/* cfg->vecs should be power of 2 for RSS */
cfg->vecs = rounddown_pow_of_two(cfg->vecs);
if (ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
if (cfg->tcs > 2)
cfg->vecs = min(cfg->vecs, 4U);
}
if (cfg->vecs <= 4)
cfg->tc_mode = AQ_TC_MODE_8TCS;
else
cfg->tc_mode = AQ_TC_MODE_4TCS;
/*rss rings */
cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
aq_nic_rss_init(self, cfg->num_rss_queues);
}
/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
void aq_nic_cfg_start(struct aq_nic_s *self)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
int i;
cfg->tcs = AQ_CFG_TCS_DEF;
......@@ -79,7 +108,6 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
cfg->fc.req = AQ_CFG_FC_MODE;
cfg->wol = AQ_CFG_WOL_MODES;
......@@ -89,29 +117,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
cfg->is_lro = AQ_CFG_IS_LRO_DEF;
cfg->is_ptp = true;
/*descriptors */
cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
/*rss rings */
cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
cfg->vecs = min(cfg->vecs, num_online_cpus());
if (self->irqvecs > AQ_HW_SERVICE_IRQS)
cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
/* cfg->vecs should be power of 2 for RSS */
if (cfg->vecs >= 8U)
cfg->vecs = 8U;
else if (cfg->vecs >= 4U)
cfg->vecs = 4U;
else if (cfg->vecs >= 2U)
cfg->vecs = 2U;
else
cfg->vecs = 1U;
cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
aq_nic_rss_init(self, cfg->num_rss_queues);
aq_nic_cfg_update_num_vecs(self);
cfg->irq_type = aq_pci_func_get_irq_type(self);
......@@ -136,6 +148,9 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
cfg->is_vlan_force_promisc = true;
for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
cfg->prio_tc_map[i] = cfg->tcs * i / 8;
}
static int aq_nic_update_link_status(struct aq_nic_s *self)
......@@ -181,6 +196,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
#if IS_ENABLED(CONFIG_MACSEC)
aq_macsec_enable(self);
#endif
if (self->aq_hw_ops->hw_tc_rate_limit_set)
self->aq_hw_ops->hw_tc_rate_limit_set(self->aq_hw);
netif_tx_wake_all_queues(self->ndev);
}
if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
......@@ -399,21 +417,29 @@ int aq_nic_init(struct aq_nic_s *self)
err = aq_phy_init(self->aq_hw);
}
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
for (i = 0U; i < self->aq_vecs; i++) {
aq_vec = self->aq_vec[i];
err = aq_vec_ring_alloc(aq_vec, self, i,
aq_nic_get_cfg(self));
if (err)
goto err_exit;
aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
}
err = aq_ptp_init(self, self->irqvecs - 1);
if (err < 0)
goto err_exit;
if (aq_nic_get_cfg(self)->is_ptp) {
err = aq_ptp_init(self, self->irqvecs - 1);
if (err < 0)
goto err_exit;
err = aq_ptp_ring_alloc(self);
if (err < 0)
goto err_exit;
err = aq_ptp_ring_alloc(self);
if (err < 0)
goto err_exit;
err = aq_ptp_ring_init(self);
if (err < 0)
goto err_exit;
err = aq_ptp_ring_init(self);
if (err < 0)
goto err_exit;
}
netif_carrier_off(self->ndev);
......@@ -424,9 +450,12 @@ int aq_nic_init(struct aq_nic_s *self)
int aq_nic_start(struct aq_nic_s *self)
{
struct aq_vec_s *aq_vec = NULL;
struct aq_nic_cfg_s *cfg;
unsigned int i = 0U;
int err = 0;
cfg = aq_nic_get_cfg(self);
err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
......@@ -464,7 +493,7 @@ int aq_nic_start(struct aq_nic_s *self)
timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
aq_nic_service_timer_cb(&self->service_timer);
if (self->aq_nic_cfg.is_polling) {
if (cfg->is_polling) {
timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
......@@ -482,16 +511,16 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
if (self->aq_nic_cfg.link_irq_vec) {
if (cfg->link_irq_vec) {
int irqvec = pci_irq_vector(self->pdev,
self->aq_nic_cfg.link_irq_vec);
cfg->link_irq_vec);
err = request_threaded_irq(irqvec, NULL,
aq_linkstate_threaded_isr,
IRQF_SHARED | IRQF_ONESHOT,
self->ndev->name, self);
if (err < 0)
goto err_exit;
self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec);
self->msix_entry_mask |= (1 << cfg->link_irq_vec);
}
err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
......@@ -500,14 +529,21 @@ int aq_nic_start(struct aq_nic_s *self)
goto err_exit;
}
err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
err = netif_set_real_num_tx_queues(self->ndev,
self->aq_vecs * cfg->tcs);
if (err < 0)
goto err_exit;
err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
err = netif_set_real_num_rx_queues(self->ndev,
self->aq_vecs * cfg->tcs);
if (err < 0)
goto err_exit;
for (i = 0; i < cfg->tcs; i++) {
u16 offset = self->aq_vecs * i;
netdev_set_tc_queue(self->ndev, i, self->aq_vecs, offset);
}
netif_tx_start_all_queues(self->ndev);
err_exit:
......@@ -518,6 +554,8 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring)
{
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
struct device *dev = aq_nic_get_dev(self);
struct aq_ring_buff_s *first = NULL;
u8 ipver = ip_hdr(skb)->version;
struct aq_ring_buff_s *dx_buff;
......@@ -559,7 +597,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
need_context_tag = true;
}
if (self->aq_nic_cfg.is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
if (cfg->is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
dx_buff->len_pkt = skb->len;
dx_buff->is_vlan = 1U;
......@@ -574,12 +612,12 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
}
dx_buff->len = skb_headlen(skb);
dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
dx_buff->pa = dma_map_single(dev,
skb->data,
dx_buff->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) {
if (unlikely(dma_mapping_error(dev, dx_buff->pa))) {
ret = 0;
goto exit;
}
......@@ -611,13 +649,13 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
else
buff_size = frag_len;
frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
frag_pa = skb_frag_dma_map(dev,
frag,
buff_offset,
buff_size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
if (unlikely(dma_mapping_error(dev,
frag_pa)))
goto mapping_error;
......@@ -651,12 +689,12 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
if (!(dx_buff->is_gso_tcp || dx_buff->is_gso_udp) &&
!dx_buff->is_vlan && dx_buff->pa) {
if (unlikely(dx_buff->is_sop)) {
dma_unmap_single(aq_nic_get_dev(self),
dma_unmap_single(dev,
dx_buff->pa,
dx_buff->len,
DMA_TO_DEVICE);
} else {
dma_unmap_page(aq_nic_get_dev(self),
dma_unmap_page(dev,
dx_buff->pa,
dx_buff->len,
DMA_TO_DEVICE);
......@@ -670,15 +708,16 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
unsigned int vec = skb->queue_mapping % cfg->vecs;
unsigned int tc = skb->queue_mapping / cfg->vecs;
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
int err = NETDEV_TX_OK;
unsigned int tc = 0U;
frags = skb_shinfo(skb)->nr_frags + 1;
ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
ring = self->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(cfg, tc, vec)];
if (frags > AQ_CFG_SKB_FRAGS_MAX) {
dev_kfree_skb_any(skb);
......@@ -687,13 +726,14 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
aq_ring_update_queue_state(ring);
if (self->aq_nic_cfg.priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
if (cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
err = NETDEV_TX_BUSY;
goto err_exit;
}
/* Above status update may stop the queue. Check this. */
if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
if (__netif_subqueue_stopped(self->ndev,
AQ_NIC_RING2QMAP(self, ring->idx))) {
err = NETDEV_TX_BUSY;
goto err_exit;
}
......@@ -823,6 +863,7 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
struct aq_stats_s *stats;
unsigned int count = 0U;
unsigned int i = 0U;
unsigned int tc;
if (self->aq_fw_ops->update_stats) {
mutex_lock(&self->fwreq_mutex);
......@@ -861,10 +902,13 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data += i;
for (i = 0U, aq_vec = self->aq_vec[0];
aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
data += count;
aq_vec_get_sw_stats(aq_vec, data, &count);
for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
for (i = 0U, aq_vec = self->aq_vec[0];
aq_vec && self->aq_vecs > i;
++i, aq_vec = self->aq_vec[i]) {
data += count;
aq_vec_get_sw_stats(aq_vec, tc, data, &count);
}
}
data += count;
......@@ -1145,9 +1189,11 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
if (!self)
goto err_exit;
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
for (i = 0U; i < self->aq_vecs; i++) {
aq_vec = self->aq_vec[i];
aq_vec_deinit(aq_vec);
aq_vec_ring_free(aq_vec);
}
aq_ptp_unregister(self);
aq_ptp_ring_deinit(self);
......@@ -1180,6 +1226,22 @@ void aq_nic_free_vectors(struct aq_nic_s *self)
err_exit:;
}
int aq_nic_realloc_vectors(struct aq_nic_s *self)
{
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
aq_nic_free_vectors(self);
for (self->aq_vecs = 0; self->aq_vecs < cfg->vecs; self->aq_vecs++) {
self->aq_vec[self->aq_vecs] = aq_vec_alloc(self, self->aq_vecs,
cfg);
if (unlikely(!self->aq_vec[self->aq_vecs]))
return -ENOMEM;
}
return 0;
}
void aq_nic_shutdown(struct aq_nic_s *self)
{
int err = 0;
......@@ -1245,3 +1307,98 @@ void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
break;
}
}
int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
const unsigned int prev_vecs = cfg->vecs;
bool ndev_running;
int err = 0;
int i;
/* if already the same configuration or
* disable request (tcs is 0) and we already is disabled
*/
if (tcs == cfg->tcs || (tcs == 0 && !cfg->is_qos))
return 0;
ndev_running = netif_running(self->ndev);
if (ndev_running)
dev_close(self->ndev);
cfg->tcs = tcs;
if (cfg->tcs == 0)
cfg->tcs = 1;
if (prio_tc_map)
memcpy(cfg->prio_tc_map, prio_tc_map, sizeof(cfg->prio_tc_map));
else
for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
cfg->prio_tc_map[i] = cfg->tcs * i / 8;
cfg->is_qos = (tcs != 0 ? true : false);
cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC);
if (!cfg->is_ptp)
netdev_warn(self->ndev, "%s\n",
"PTP is auto disabled due to requested TC count.");
netdev_set_num_tc(self->ndev, cfg->tcs);
/* Changing the number of TCs might change the number of vectors */
aq_nic_cfg_update_num_vecs(self);
if (prev_vecs != cfg->vecs) {
err = aq_nic_realloc_vectors(self);
if (err)
goto err_exit;
}
if (ndev_running)
err = dev_open(self->ndev, NULL);
err_exit:
return err;
}
int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
const u32 max_rate)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
if (tc >= AQ_CFG_TCS_MAX)
return -EINVAL;
if (max_rate && max_rate < 10) {
netdev_warn(self->ndev,
"Setting %s to the minimum usable value of %dMbps.\n",
"max rate", 10);
cfg->tc_max_rate[tc] = 10;
} else {
cfg->tc_max_rate[tc] = max_rate;
}
return 0;
}
int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
const u32 min_rate)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
if (tc >= AQ_CFG_TCS_MAX)
return -EINVAL;
if (min_rate)
set_bit(tc, &cfg->tc_min_rate_msk);
else
clear_bit(tc, &cfg->tc_min_rate_msk);
if (min_rate && min_rate < 20) {
netdev_warn(self->ndev,
"Setting %s to the minimum usable value of %dMbps.\n",
"min rate", 20);
cfg->tc_min_rate[tc] = 20;
} else {
cfg->tc_min_rate[tc] = min_rate;
}
return 0;
}
......@@ -59,8 +59,15 @@ struct aq_nic_cfg_s {
bool is_polling;
bool is_rss;
bool is_lro;
bool is_qos;
bool is_ptp;
enum aq_tc_mode tc_mode;
u32 priv_flags;
u8 tcs;
u8 prio_tc_map[8];
u32 tc_max_rate[AQ_CFG_TCS_MAX];
unsigned long tc_min_rate_msk;
u32 tc_min_rate[AQ_CFG_TCS_MAX];
struct aq_rss_parameters aq_rss;
u32 eee_speeds;
};
......@@ -77,8 +84,16 @@ struct aq_nic_cfg_s {
#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\
WAKE_PHY)
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
#define AQ_NIC_CFG_RING_PER_TC(_NIC_CFG_) \
(((_NIC_CFG_)->tc_mode == AQ_TC_MODE_4TCS) ? 8 : 4)
#define AQ_NIC_CFG_TCVEC2RING(_NIC_CFG_, _TC_, _VEC_) \
((_TC_) * AQ_NIC_CFG_RING_PER_TC(_NIC_CFG_) + (_VEC_))
#define AQ_NIC_RING2QMAP(_NIC_, _ID_) \
((_ID_) / AQ_NIC_CFG_RING_PER_TC(&(_NIC_)->aq_nic_cfg) * \
(_NIC_)->aq_vecs + \
((_ID_) % AQ_NIC_CFG_RING_PER_TC(&(_NIC_)->aq_nic_cfg)))
struct aq_hw_rx_fl2 {
struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS];
......@@ -104,7 +119,7 @@ struct aq_nic_s {
atomic_t flags;
u32 msg_enable;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX];
struct aq_hw_s *aq_hw;
struct net_device *ndev;
unsigned int aq_vecs;
......@@ -164,6 +179,7 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down);
void aq_nic_set_power(struct aq_nic_s *self);
void aq_nic_free_hot_resources(struct aq_nic_s *self);
void aq_nic_free_vectors(struct aq_nic_s *self);
int aq_nic_realloc_vectors(struct aq_nic_s *self);
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev);
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags);
......@@ -181,4 +197,9 @@ void aq_nic_shutdown(struct aq_nic_s *self);
u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);
void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
u32 location);
int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map);
int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
const u32 max_rate);
int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
const u32 min_rate);
#endif /* AQ_NIC_H */
......@@ -431,6 +431,9 @@ static int atl_resume_common(struct device *dev, bool deep)
netif_tx_start_all_queues(nic->ndev);
err_exit:
if (ret < 0)
aq_nic_deinit(nic, true);
rtnl_unlock();
return ret;
......
......@@ -945,26 +945,29 @@ void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
#define PTP_4TC_RING_IDX 16
#define PTP_HWST_RING_IDX 31
/* Index must be 8 (8 TCs) or 16 (4 TCs).
* It depends on Traffic Class mode.
*/
static unsigned int ptp_ring_idx(const enum aq_tc_mode tc_mode)
{
if (tc_mode == AQ_TC_MODE_8TCS)
return PTP_8TC_RING_IDX;
return PTP_4TC_RING_IDX;
}
int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
unsigned int tx_ring_idx, rx_ring_idx;
struct aq_ring_s *hwts;
u32 tx_tc_mode, rx_tc_mode;
struct aq_ring_s *ring;
int err;
if (!aq_ptp)
return 0;
/* Index must to be 8 (8 TCs) or 16 (4 TCs).
* It depends from Traffic Class mode.
*/
aq_nic->aq_hw_ops->hw_tx_tc_mode_get(aq_nic->aq_hw, &tx_tc_mode);
if (tx_tc_mode == 0)
tx_ring_idx = PTP_8TC_RING_IDX;
else
tx_ring_idx = PTP_4TC_RING_IDX;
tx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
tx_ring_idx, &aq_nic->aq_nic_cfg);
......@@ -973,11 +976,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
goto err_exit;
}
aq_nic->aq_hw_ops->hw_rx_tc_mode_get(aq_nic->aq_hw, &rx_tc_mode);
if (rx_tc_mode == 0)
rx_ring_idx = PTP_8TC_RING_IDX;
else
rx_ring_idx = PTP_4TC_RING_IDX;
rx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
rx_ring_idx, &aq_nic->aq_nic_cfg);
......
......@@ -232,8 +232,11 @@ void aq_ring_queue_wake(struct aq_ring_s *ring)
{
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
if (__netif_subqueue_stopped(ndev, ring->idx)) {
netif_wake_subqueue(ndev, ring->idx);
if (__netif_subqueue_stopped(ndev,
AQ_NIC_RING2QMAP(ring->aq_nic,
ring->idx))) {
netif_wake_subqueue(ndev,
AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
ring->stats.tx.queue_restarts++;
}
}
......@@ -242,8 +245,11 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)
{
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
if (!__netif_subqueue_stopped(ndev, ring->idx))
netif_stop_subqueue(ndev, ring->idx);
if (!__netif_subqueue_stopped(ndev,
AQ_NIC_RING2QMAP(ring->aq_nic,
ring->idx)))
netif_stop_subqueue(ndev,
AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
}
bool aq_ring_tx_clean(struct aq_ring_s *self)
......@@ -466,7 +472,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_NONE);
/* Send all PTP traffic to 0 queue */
skb_record_rx_queue(skb, is_ptp_ring ? 0 : self->idx);
skb_record_rx_queue(skb,
is_ptp_ring ? 0
: AQ_NIC_RING2QMAP(self->aq_nic,
self->idx));
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
......
......@@ -103,16 +103,11 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg)
{
struct aq_ring_s *ring = NULL;
struct aq_vec_s *self = NULL;
unsigned int i = 0U;
int err = 0;
self = kzalloc(sizeof(*self), GFP_KERNEL);
if (!self) {
err = -ENOMEM;
if (!self)
goto err_exit;
}
self->aq_nic = aq_nic;
self->aq_ring_param.vec_idx = idx;
......@@ -128,10 +123,20 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
err_exit:
return self;
}
int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg)
{
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
int err = 0;
for (i = 0; i < aq_nic_cfg->tcs; ++i) {
unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic,
self->tx_rings,
self->aq_ring_param.vec_idx);
const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
i, idx);
ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
idx_ring, aq_nic_cfg);
......@@ -156,11 +161,11 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
err_exit:
if (err < 0) {
aq_vec_free(self);
aq_vec_ring_free(self);
self = NULL;
}
return self;
return err;
}
int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
......@@ -269,6 +274,18 @@ err_exit:;
}
void aq_vec_free(struct aq_vec_s *self)
{
if (!self)
goto err_exit;
netif_napi_del(&self->napi);
kfree(self);
err_exit:;
}
void aq_vec_ring_free(struct aq_vec_s *self)
{
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
......@@ -279,13 +296,12 @@ void aq_vec_free(struct aq_vec_s *self)
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
aq_ring_free(&ring[AQ_VEC_TX_ID]);
aq_ring_free(&ring[AQ_VEC_RX_ID]);
if (i < self->rx_rings)
aq_ring_free(&ring[AQ_VEC_RX_ID]);
}
netif_napi_del(&self->napi);
kfree(self);
self->tx_rings = 0;
self->rx_rings = 0;
err_exit:;
}
......@@ -333,16 +349,14 @@ cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
return &self->aq_ring_param.affinity_mask;
}
void aq_vec_add_stats(struct aq_vec_s *self,
struct aq_ring_stats_rx_s *stats_rx,
struct aq_ring_stats_tx_s *stats_tx)
static void aq_vec_add_stats(struct aq_vec_s *self,
const unsigned int tc,
struct aq_ring_stats_rx_s *stats_rx,
struct aq_ring_stats_tx_s *stats_tx)
{
struct aq_ring_s *ring = NULL;
unsigned int r = 0U;
struct aq_ring_s *ring = self->ring[tc];
for (r = 0U, ring = self->ring[0];
self->tx_rings > r; ++r, ring = self->ring[r]) {
struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
if (tc < self->rx_rings) {
struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
stats_rx->packets += rx->packets;
......@@ -353,6 +367,10 @@ void aq_vec_add_stats(struct aq_vec_s *self,
stats_rx->pg_losts += rx->pg_losts;
stats_rx->pg_flips += rx->pg_flips;
stats_rx->pg_reuses += rx->pg_reuses;
}
if (tc < self->tx_rings) {
struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
stats_tx->packets += tx->packets;
stats_tx->bytes += tx->bytes;
......@@ -361,7 +379,8 @@ void aq_vec_add_stats(struct aq_vec_s *self,
}
}
int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data,
unsigned int *p_count)
{
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
......@@ -369,7 +388,8 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
aq_vec_add_stats(self, &stats_rx, &stats_tx);
aq_vec_add_stats(self, tc, &stats_rx, &stats_tx);
/* This data should mimic aq_ethtool_queue_stat_names structure
*/
......
......@@ -25,17 +25,17 @@ irqreturn_t aq_vec_isr(int irq, void *private);
irqreturn_t aq_vec_isr_legacy(int irq, void *private);
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg);
int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw);
void aq_vec_deinit(struct aq_vec_s *self);
void aq_vec_free(struct aq_vec_s *self);
void aq_vec_ring_free(struct aq_vec_s *self);
int aq_vec_start(struct aq_vec_s *self);
void aq_vec_stop(struct aq_vec_s *self);
cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self);
int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data,
int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data,
unsigned int *p_count);
void aq_vec_add_stats(struct aq_vec_s *self,
struct aq_ring_stats_rx_s *stats_rx,
struct aq_ring_stats_tx_s *stats_tx);
#endif /* AQ_VEC_H */
......@@ -21,7 +21,7 @@
.msix_irqs = 4U, \
.irq_mask = ~0U, \
.vecs = HW_ATL_A0_RSS_MAX, \
.tcs = HW_ATL_A0_TC_MAX, \
.tcs_max = HW_ATL_A0_TC_MAX, \
.rxd_alignment = 1U, \
.rxd_size = HW_ATL_A0_RXD_SIZE, \
.rxds_max = HW_ATL_A0_MAX_RXD, \
......@@ -136,10 +136,10 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0U, 0xFFF);
hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0U, 0x64);
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0U, 0x50);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0U, 0x1E);
/* Tx buf size */
buff_size = HW_ATL_A0_TXBUF_MAX;
......
......@@ -23,7 +23,7 @@
.msix_irqs = 8U, \
.irq_mask = ~0U, \
.vecs = HW_ATL_B0_RSS_MAX, \
.tcs = HW_ATL_B0_TC_MAX, \
.tcs_max = HW_ATL_B0_TC_MAX, \
.rxd_alignment = 1U, \
.rxd_size = HW_ATL_B0_RXD_SIZE, \
.rxds_max = HW_ATL_B0_MAX_RXD, \
......@@ -46,7 +46,8 @@
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_GSO_UDP_L4 | \
NETIF_F_GSO_PARTIAL, \
NETIF_F_GSO_PARTIAL | \
NETIF_F_HW_TC, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
......@@ -114,12 +115,34 @@ static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
return 0;
}
static int hw_atl_b0_tc_ptp_set(struct aq_hw_s *self)
{
/* Init TC2 for PTP_TX */
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
AQ_HW_PTP_TC);
/* Init TC2 for PTP_RX */
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
AQ_HW_PTP_TC);
/* No flow control for PTP */
hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, AQ_HW_PTP_TC);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
{
unsigned int i_priority = 0U;
u32 buff_size = 0U;
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
u32 tx_buff_size = HW_ATL_B0_TXBUF_MAX;
u32 rx_buff_size = HW_ATL_B0_RXBUF_MAX;
unsigned int prio = 0U;
u32 tc = 0U;
if (cfg->is_ptp) {
tx_buff_size -= HW_ATL_B0_PTP_TXBUF_SIZE;
rx_buff_size -= HW_ATL_B0_PTP_RXBUF_SIZE;
}
/* TPS Descriptor rate init */
hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
......@@ -127,63 +150,39 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
/* TPS VM init */
hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
tc = 0;
/* TX Packet Scheduler Data TC0 */
hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, tc);
hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, tc);
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
/* Tx buf size TC0 */
buff_size = HW_ATL_B0_TXBUF_MAX - HW_ATL_B0_PTP_TXBUF_SIZE;
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
(buff_size *
(1024 / 32U) * 66U) /
100U, tc);
hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
(buff_size *
(1024 / 32U) * 50U) /
100U, tc);
/* Init TC2 for PTP_TX */
tc = 2;
tx_buff_size /= cfg->tcs;
rx_buff_size /= cfg->tcs;
for (tc = 0; tc < cfg->tcs; tc++) {
u32 threshold = 0U;
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_TXBUF_SIZE,
tc);
/* Tx buf size TC0 */
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
/* QoS Rx buf size per TC */
tc = 0;
buff_size = HW_ATL_B0_RXBUF_MAX - HW_ATL_B0_PTP_RXBUF_SIZE;
threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
(buff_size *
(1024U / 32U) * 66U) /
100U, tc);
hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
(buff_size *
(1024U / 32U) * 50U) /
100U, tc);
threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
/* QoS Rx buf size per TC */
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
/* Init TC2 for PTP_RX */
tc = 2;
threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, HW_ATL_B0_PTP_RXBUF_SIZE,
tc);
/* No flow control for PTP */
hw_atl_rpb_rx_xoff_en_per_tc_set(self, 0U, tc);
threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
}
if (cfg->is_ptp)
hw_atl_b0_tc_ptp_set(self);
/* QoS 802.1p priority -> TC mapping */
for (i_priority = 8U; i_priority--;)
hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
for (prio = 0; prio < 8; ++prio)
hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio,
cfg->prio_tc_map[prio]);
return aq_hw_err_from_flags(self);
}
......@@ -311,10 +310,124 @@ int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
{
static const u32 max_weight = BIT(HW_ATL_TPS_DATA_TCTWEIGHT_WIDTH) - 1;
/* Scale factor is based on the number of bits in fractional portion */
static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH);
static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >>
HW_ATL_TPS_DESC_RATE_Y_SHIFT;
const u32 link_speed = self->aq_link_status.mbps;
struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
unsigned long num_min_rated_tcs = 0;
u32 tc_weight[AQ_CFG_TCS_MAX];
u32 fixed_max_credit;
u8 min_rate_msk = 0;
u32 sum_weight = 0;
int tc;
/* By default max_credit is based upon MTU (in unit of 64b) */
fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64;
if (link_speed) {
min_rate_msk = nic_cfg->tc_min_rate_msk &
(BIT(nic_cfg->tcs) - 1);
num_min_rated_tcs = hweight8(min_rate_msk);
}
/* First, calculate weights where min_rate is specified */
if (num_min_rated_tcs) {
for (tc = 0; tc != nic_cfg->tcs; tc++) {
if (!nic_cfg->tc_min_rate[tc]) {
tc_weight[tc] = 0;
continue;
}
tc_weight[tc] = (-1L + link_speed +
nic_cfg->tc_min_rate[tc] *
max_weight) /
link_speed;
tc_weight[tc] = min(tc_weight[tc], max_weight);
sum_weight += tc_weight[tc];
}
}
/* WSP, if min_rate is set for at least one TC.
* RR otherwise.
*/
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
/* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
* leave Descriptor TC Arbiter as RR.
*/
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U);
for (tc = 0; tc != nic_cfg->tcs; tc++) {
const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U;
const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
u32 weight, max_credit;
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc,
fixed_max_credit);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E);
if (num_min_rated_tcs) {
weight = tc_weight[tc];
if (!weight && sum_weight < max_weight)
weight = (max_weight - sum_weight) /
(nic_cfg->tcs - num_min_rated_tcs);
else if (!weight)
weight = 0x64;
max_credit = max(8 * weight, fixed_max_credit);
} else {
weight = 0x64;
max_credit = 0xFFF;
}
hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight);
hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc,
max_credit);
hw_atl_tps_tx_desc_rate_en_set(self, desc, en);
if (en) {
/* Nominal rate is always 10G */
const u32 rate = 10000U * scale /
nic_cfg->tc_max_rate[tc];
const u32 rate_int = rate >>
HW_ATL_TPS_DESC_RATE_Y_WIDTH;
const u32 rate_frac = rate & frac_msk;
hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int);
hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac);
} else {
/* A value of 1 indicates the queue is not
* rate controlled.
*/
hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
}
}
for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) {
const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U);
hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
{
struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
/* Tx TC/Queue number config */
hw_atl_tpb_tps_tx_tc_mode_set(self, 1U);
hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode);
hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
......@@ -334,20 +447,32 @@ static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
u32 rss_ctrl1 = HW_ATL_RSS_DISABLED;
if (cfg->is_rss)
rss_ctrl1 = (cfg->tc_mode == AQ_TC_MODE_8TCS) ?
HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS :
HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS;
hw_atl_reg_rx_flr_rss_control1set(self, rss_ctrl1);
}
static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int i;
/* Rx TC/RSS number config */
hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode);
/* Rx flow control */
hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
/* RSS Ring selection */
hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
0xB3333333U : 0x00000000U);
hw_atl_b0_hw_init_rx_rss_ctrl1(self);
/* Multicast filters */
for (i = HW_ATL_B0_MAC_MAX; i--;) {
......@@ -1078,18 +1203,6 @@ int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_tx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
{
*tc_mode = hw_atl_tpb_tps_tx_tc_mode_get(self);
return aq_hw_err_from_flags(self);
}
static int hw_atl_b0_rx_tc_mode_get(struct aq_hw_s *self, u32 *tc_mode)
{
*tc_mode = hw_atl_rpb_rpf_rx_traf_class_mode_get(self);
return aq_hw_err_from_flags(self);
}
#define get_ptp_ts_val_u64(self, indx) \
((u64)(hw_atl_pcs_ptp_clock_get(self, indx) & 0xffff))
......@@ -1503,13 +1616,11 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl_b0_hw_rss_set,
.hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
.hw_tc_rate_limit_set = hw_atl_b0_hw_init_tx_tc_rate_limit,
.hw_get_regs = hw_atl_utils_hw_get_regs,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
.hw_tx_tc_mode_get = hw_atl_b0_tx_tc_mode_get,
.hw_rx_tc_mode_get = hw_atl_b0_rx_tc_mode_get,
.hw_ring_hwts_rx_fill = hw_atl_b0_hw_ring_hwts_rx_fill,
.hw_ring_hwts_rx_receive = hw_atl_b0_hw_ring_hwts_rx_receive,
......
......@@ -58,6 +58,8 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self);
int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr);
int hw_atl_b0_hw_start(struct aq_hw_s *self);
......
......@@ -75,7 +75,7 @@
#define HW_ATL_B0_RSS_HASHKEY_BITS 320U
#define HW_ATL_B0_TCRSS_4_8 1
#define HW_ATL_B0_TC_MAX 1U
#define HW_ATL_B0_TC_MAX 8U
#define HW_ATL_B0_RSS_MAX 8U
#define HW_ATL_B0_LRO_RXD_MAX 16U
......@@ -151,6 +151,10 @@
#define HW_ATL_B0_MAX_RXD 8184U
#define HW_ATL_B0_MAX_TXD 8184U
#define HW_ATL_RSS_DISABLED 0x00000000U
#define HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS 0xA2222222U
#define HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS 0x80003333U
/* HW layer capabilities */
#endif /* HW_ATL_B0_INTERNAL_H */
......@@ -754,7 +754,7 @@ void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
}
void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
u32 user_priority_tc_map, u32 tc)
u32 user_priority, u32 tc)
{
/* register address for bitfield rx_tc_up{t}[2:0] */
static u32 rpf_rpb_rx_tc_upt_adr[8] = {
......@@ -773,10 +773,9 @@ void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
};
aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc],
rpf_rpb_rx_tc_upt_msk[tc],
rpf_rpb_rx_tc_upt_shft[tc],
user_priority_tc_map);
aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[user_priority],
rpf_rpb_rx_tc_upt_msk[user_priority],
rpf_rpb_rx_tc_upt_shft[user_priority], tc);
}
void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
......@@ -1464,8 +1463,8 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc)
const u32 tc,
const u32 max_credit)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc),
HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK,
......@@ -1474,13 +1473,13 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_weight,
u32 tc)
const u32 tc,
const u32 weight)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc),
HW_ATL_TPS_DESC_TCTWEIGHT_MSK,
HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT,
tx_pkt_shed_desc_tc_weight);
weight);
}
void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
......@@ -1493,8 +1492,8 @@ void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc)
const u32 tc,
const u32 max_credit)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK,
......@@ -1503,13 +1502,49 @@ void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight,
u32 tc)
const u32 tc,
const u32 weight)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc),
HW_ATL_TPS_DATA_TCTWEIGHT_MSK,
HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT,
tx_pkt_shed_tc_data_weight);
weight);
}
void hw_atl_tps_tx_desc_rate_mode_set(struct aq_hw_s *aq_hw,
const u32 rate_mode)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_TX_DESC_RATE_MODE_ADR,
HW_ATL_TPS_TX_DESC_RATE_MODE_MSK,
HW_ATL_TPS_TX_DESC_RATE_MODE_SHIFT,
rate_mode);
}
void hw_atl_tps_tx_desc_rate_en_set(struct aq_hw_s *aq_hw, const u32 desc,
const u32 enable)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_EN_ADR(desc),
HW_ATL_TPS_DESC_RATE_EN_MSK,
HW_ATL_TPS_DESC_RATE_EN_SHIFT,
enable);
}
void hw_atl_tps_tx_desc_rate_x_set(struct aq_hw_s *aq_hw, const u32 desc,
const u32 rate_int)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_X_ADR(desc),
HW_ATL_TPS_DESC_RATE_X_MSK,
HW_ATL_TPS_DESC_RATE_X_SHIFT,
rate_int);
}
void hw_atl_tps_tx_desc_rate_y_set(struct aq_hw_s *aq_hw, const u32 desc,
const u32 rate_frac)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_Y_ADR(desc),
HW_ATL_TPS_DESC_RATE_Y_MSK,
HW_ATL_TPS_DESC_RATE_Y_SHIFT,
rate_frac);
}
/* tx */
......
......@@ -688,13 +688,13 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
/* set tx packet scheduler descriptor tc max credit */
void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc);
const u32 tc,
const u32 max_credit);
/* set tx packet scheduler descriptor tc weight */
void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_weight,
u32 tc);
const u32 tc,
const u32 weight);
/* set tx packet scheduler descriptor vm arbitration mode */
void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
......@@ -702,13 +702,29 @@ void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
/* set tx packet scheduler tc data max credit */
void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc);
const u32 tc,
const u32 max_credit);
/* set tx packet scheduler tc data weight */
void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight,
u32 tc);
const u32 tc,
const u32 weight);
/* set tx descriptor rate mode */
void hw_atl_tps_tx_desc_rate_mode_set(struct aq_hw_s *aq_hw,
const u32 rate_mode);
/* set tx packet scheduler descriptor rate enable */
void hw_atl_tps_tx_desc_rate_en_set(struct aq_hw_s *aq_hw, const u32 desc,
const u32 enable);
/* set tx packet scheduler descriptor rate integral value */
void hw_atl_tps_tx_desc_rate_x_set(struct aq_hw_s *aq_hw, const u32 desc,
const u32 rate_int);
/* set tx packet scheduler descriptor rate fractional value */
void hw_atl_tps_tx_desc_rate_y_set(struct aq_hw_s *aq_hw, const u32 desc,
const u32 rate_frac);
/* tx */
......
......@@ -2038,6 +2038,42 @@
/* default value of bitfield lso_tcp_flag_mid[b:0] */
#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0
/* tx tx_tc_mode bitfield definitions
* preprocessor definitions for the bitfield "tx_tc_mode".
* port="pif_tpb_tx_tc_mode_i,pif_tps_tx_tc_mode_i"
*/
/* register address for bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
/* bitmask for bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
/* inverted bitmask for bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
/* lower bit position of bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
/* width of bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
/* default value of bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
/* tx tx_desc_rate_mode bitfield definitions
* preprocessor definitions for the bitfield "tx_desc_rate_mode".
* port="pif_tps_desc_rate_mode_i"
*/
/* register address for bitfield tx_desc_rate_mode */
#define HW_ATL_TPS_TX_DESC_RATE_MODE_ADR 0x00007900
/* bitmask for bitfield tx_desc_rate_mode */
#define HW_ATL_TPS_TX_DESC_RATE_MODE_MSK 0x00000080
/* inverted bitmask for bitfield tx_desc_rate_mode */
#define HW_ATL_TPS_TX_DESC_RATE_MODE_MSKN 0xFFFFFF7F
/* lower bit position of bitfield tx_desc_rate_mode */
#define HW_ATL_TPS_TX_DESC_RATE_MODE_SHIFT 7
/* width of bitfield tx_desc_rate_mode */
#define HW_ATL_TPS_TX_DESC_RATE_MODE_WIDTH 1
/* default value of bitfield tx_desc_rate_mode */
#define HW_ATL_TPS_TX_DESC_RATE_MODE_DEFAULT 0x0
/* tx tx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "tx_buf_en".
* port="pif_tpb_tx_buf_en_i"
......@@ -2056,19 +2092,6 @@
/* default value of bitfield tx_buf_en */
#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
/* register address for bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
/* bitmask for bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
/* inverted bitmask for bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
/* lower bit position of bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
/* width of bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
/* default value of bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
/* tx tx{b}_hi_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
......@@ -2270,6 +2293,58 @@
/* default value of bitfield data_tc_arb_mode */
#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
/* tx desc{r}_rate_en bitfield definitions
* preprocessor definitions for the bitfield "desc{r}_rate_en".
* port="pif_tps_desc_rate_en_i[0]"
*/
/* register address for bitfield desc{r}_rate_en */
#define HW_ATL_TPS_DESC_RATE_EN_ADR(desc) (0x00007408 + (desc) * 0x10)
/* bitmask for bitfield desc{r}_rate_en */
#define HW_ATL_TPS_DESC_RATE_EN_MSK 0x80000000
/* inverted bitmask for bitfield desc{r}_rate_en */
#define HW_ATL_TPS_DESC_RATE_EN_MSKN 0x7FFFFFFF
/* lower bit position of bitfield desc{r}_rate_en */
#define HW_ATL_TPS_DESC_RATE_EN_SHIFT 31
/* width of bitfield desc{r}_rate_en */
#define HW_ATL_TPS_DESC_RATE_EN_WIDTH 1
/* default value of bitfield desc{r}_rate_en */
#define HW_ATL_TPS_DESC_RATE_EN_DEFAULT 0x0
/* tx desc{r}_rate_x bitfield definitions
* preprocessor definitions for the bitfield "desc{r}_rate_x".
* port="pif_tps_desc0_rate_x"
*/
/* register address for bitfield desc{r}_rate_x */
#define HW_ATL_TPS_DESC_RATE_X_ADR(desc) (0x00007408 + (desc) * 0x10)
/* bitmask for bitfield desc{r}_rate_x */
#define HW_ATL_TPS_DESC_RATE_X_MSK 0x03FF0000
/* inverted bitmask for bitfield desc{r}_rate_x */
#define HW_ATL_TPS_DESC_RATE_X_MSKN 0xFC00FFFF
/* lower bit position of bitfield desc{r}_rate_x */
#define HW_ATL_TPS_DESC_RATE_X_SHIFT 16
/* width of bitfield desc{r}_rate_x */
#define HW_ATL_TPS_DESC_RATE_X_WIDTH 10
/* default value of bitfield desc{r}_rate_x */
#define HW_ATL_TPS_DESC_RATE_X_DEFAULT 0x0
/* tx desc{r}_rate_y bitfield definitions
* preprocessor definitions for the bitfield "desc{r}_rate_y".
* port="pif_tps_desc0_rate_y"
*/
/* register address for bitfield desc{r}_rate_y */
#define HW_ATL_TPS_DESC_RATE_Y_ADR(desc) (0x00007408 + (desc) * 0x10)
/* bitmask for bitfield desc{r}_rate_y */
#define HW_ATL_TPS_DESC_RATE_Y_MSK 0x00003FFF
/* inverted bitmask for bitfield desc{r}_rate_y */
#define HW_ATL_TPS_DESC_RATE_Y_MSKN 0xFFFFC000
/* lower bit position of bitfield desc{r}_rate_y */
#define HW_ATL_TPS_DESC_RATE_Y_SHIFT 0
/* width of bitfield desc{r}_rate_y */
#define HW_ATL_TPS_DESC_RATE_Y_WIDTH 14
/* default value of bitfield desc{r}_rate_y */
#define HW_ATL_TPS_DESC_RATE_Y_DEFAULT 0x0
/* tx desc_rate_ta_rst bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_ta_rst".
* port="pif_tps_desc_rate_ta_rst_i"
......
......@@ -10,6 +10,7 @@
#include "hw_atl/hw_atl_b0.h"
#include "hw_atl/hw_atl_utils.h"
#include "hw_atl/hw_atl_llh.h"
#include "hw_atl/hw_atl_llh_internal.h"
#include "hw_atl2_utils.h"
#include "hw_atl2_llh.h"
#include "hw_atl2_internal.h"
......@@ -23,7 +24,7 @@ static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
.msix_irqs = 8U, \
.irq_mask = ~0U, \
.vecs = HW_ATL2_RSS_MAX, \
.tcs = HW_ATL2_TC_MAX, \
.tcs_max = HW_ATL2_TC_MAX, \
.rxd_alignment = 1U, \
.rxd_size = HW_ATL2_RXD_SIZE, \
.rxds_max = HW_ATL2_MAX_RXD, \
......@@ -47,7 +48,8 @@ static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_GSO_UDP_L4 | \
NETIF_F_GSO_PARTIAL, \
NETIF_F_GSO_PARTIAL | \
NETIF_F_HW_TC, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL2_MTU_JUMBO, \
......@@ -91,16 +93,49 @@ static int hw_atl2_hw_reset(struct aq_hw_s *self)
static int hw_atl2_hw_queue_to_tc_map_set(struct aq_hw_s *self)
{
if (!hw_atl_rpb_rpf_rx_traf_class_mode_get(self)) {
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(0), 0x11110000);
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(8), 0x33332222);
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(16), 0x55554444);
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(24), 0x77776666);
} else {
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(0), 0x00000000);
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(8), 0x11111111);
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(16), 0x22222222);
aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(24), 0x33333333);
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
unsigned int tcs, q_per_tc;
unsigned int tc, q;
u32 rx_map = 0;
u32 tx_map = 0;
hw_atl2_tpb_tx_tc_q_rand_map_en_set(self, 1U);
switch (cfg->tc_mode) {
case AQ_TC_MODE_8TCS:
tcs = 8;
q_per_tc = 4;
break;
case AQ_TC_MODE_4TCS:
tcs = 4;
q_per_tc = 8;
break;
default:
return -EINVAL;
}
for (tc = 0; tc != tcs; tc++) {
unsigned int tc_q_offset = tc * q_per_tc;
for (q = tc_q_offset; q != tc_q_offset + q_per_tc; q++) {
rx_map |= tc << HW_ATL2_RX_Q_TC_MAP_SHIFT(q);
if (HW_ATL2_RX_Q_TC_MAP_ADR(q) !=
HW_ATL2_RX_Q_TC_MAP_ADR(q + 1)) {
aq_hw_write_reg(self,
HW_ATL2_RX_Q_TC_MAP_ADR(q),
rx_map);
rx_map = 0;
}
tx_map |= tc << HW_ATL2_TX_Q_TC_MAP_SHIFT(q);
if (HW_ATL2_TX_Q_TC_MAP_ADR(q) !=
HW_ATL2_TX_Q_TC_MAP_ADR(q + 1)) {
aq_hw_write_reg(self,
HW_ATL2_TX_Q_TC_MAP_ADR(q),
tx_map);
tx_map = 0;
}
}
}
return aq_hw_err_from_flags(self);
......@@ -112,7 +147,6 @@ static int hw_atl2_hw_qos_set(struct aq_hw_s *self)
u32 tx_buff_size = HW_ATL2_TXBUF_MAX;
u32 rx_buff_size = HW_ATL2_RXBUF_MAX;
unsigned int prio = 0U;
u32 threshold = 0U;
u32 tc = 0U;
/* TPS Descriptor rate init */
......@@ -122,42 +156,36 @@ static int hw_atl2_hw_qos_set(struct aq_hw_s *self)
/* TPS VM init */
hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
/* TPS TC credits init */
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
tx_buff_size /= cfg->tcs;
rx_buff_size /= cfg->tcs;
for (tc = 0; tc < cfg->tcs; tc++) {
u32 threshold = 0U;
tc = 0;
/* Tx buf size TC0 */
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
/* TX Packet Scheduler Data TC0 */
hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF0, tc);
hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, 0x640, tc);
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, tc);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, tc);
threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
/* Tx buf size TC0 */
hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc);
threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U;
hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc);
/* QoS Rx buf size per TC */
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U;
hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc);
threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
/* QoS Rx buf size per TC */
hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc);
threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U;
hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc);
threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
}
/* QoS 802.1p priority -> TC mapping */
for (prio = 0; prio < 8; ++prio)
hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio,
cfg->tcs * prio / 8);
cfg->prio_tc_map[prio]);
/* ATL2 Apply legacy ring to TC mapping */
/* ATL2 Apply ring to TC mapping */
hw_atl2_hw_queue_to_tc_map_set(self);
return aq_hw_err_from_flags(self);
......@@ -166,19 +194,149 @@ static int hw_atl2_hw_qos_set(struct aq_hw_s *self)
static int hw_atl2_hw_rss_set(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params)
{
u8 *indirection_table = rss_params->indirection_table;
u8 *indirection_table = rss_params->indirection_table;
const u32 num_tcs = aq_hw_num_tcs(self);
u32 rpf_redir2_enable;
int tc;
int i;
for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;)
hw_atl2_new_rpf_rss_redir_set(self, 0, i, indirection_table[i]);
rpf_redir2_enable = num_tcs > 4 ? 1 : 0;
hw_atl2_rpf_redirection_table2_select_set(self, rpf_redir2_enable);
for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;) {
for (tc = 0; tc != num_tcs; tc++) {
hw_atl2_new_rpf_rss_redir_set(self, tc, i,
tc *
aq_hw_q_per_tc(self) +
indirection_table[i]);
}
}
return aq_hw_err_from_flags(self);
}
static int hw_atl2_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
{
static const u32 max_weight = BIT(HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH) - 1;
/* Scale factor is based on the number of bits in fractional portion */
static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH);
static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >>
HW_ATL_TPS_DESC_RATE_Y_SHIFT;
const u32 link_speed = self->aq_link_status.mbps;
struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
unsigned long num_min_rated_tcs = 0;
u32 tc_weight[AQ_CFG_TCS_MAX];
u32 fixed_max_credit_4b;
u32 fixed_max_credit;
u8 min_rate_msk = 0;
u32 sum_weight = 0;
int tc;
/* By default max_credit is based upon MTU (in unit of 64b) */
fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64;
/* in unit of 4b */
fixed_max_credit_4b = nic_cfg->aq_hw_caps->mtu / 4;
if (link_speed) {
min_rate_msk = nic_cfg->tc_min_rate_msk &
(BIT(nic_cfg->tcs) - 1);
num_min_rated_tcs = hweight8(min_rate_msk);
}
/* First, calculate weights where min_rate is specified */
if (num_min_rated_tcs) {
for (tc = 0; tc != nic_cfg->tcs; tc++) {
if (!nic_cfg->tc_min_rate[tc]) {
tc_weight[tc] = 0;
continue;
}
tc_weight[tc] = (-1L + link_speed +
nic_cfg->tc_min_rate[tc] *
max_weight) /
link_speed;
tc_weight[tc] = min(tc_weight[tc], max_weight);
sum_weight += tc_weight[tc];
}
}
/* WSP, if min_rate is set for at least one TC.
* RR otherwise.
*/
hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
/* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
* leave Descriptor TC Arbiter as RR.
*/
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U);
for (tc = 0; tc != nic_cfg->tcs; tc++) {
const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U;
const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
u32 weight, max_credit;
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc,
fixed_max_credit);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E);
if (num_min_rated_tcs) {
weight = tc_weight[tc];
if (!weight && sum_weight < max_weight)
weight = (max_weight - sum_weight) /
(nic_cfg->tcs - num_min_rated_tcs);
else if (!weight)
weight = 0x640;
max_credit = max(2 * weight, fixed_max_credit_4b);
} else {
weight = 0x640;
max_credit = 0xFFF0;
}
hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight);
hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc,
max_credit);
hw_atl_tps_tx_desc_rate_en_set(self, desc, en);
if (en) {
/* Nominal rate is always 10G */
const u32 rate = 10000U * scale /
nic_cfg->tc_max_rate[tc];
const u32 rate_int = rate >>
HW_ATL_TPS_DESC_RATE_Y_WIDTH;
const u32 rate_frac = rate & frac_msk;
hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int);
hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac);
} else {
/* A value of 1 indicates the queue is not
* rate controlled.
*/
hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
}
}
for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) {
const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0);
hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U);
hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U);
hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self)
{
struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg;
/* Tx TC/RSS number config */
hw_atl_tpb_tps_tx_tc_mode_set(self, 1U);
hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode);
hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
......@@ -201,13 +359,29 @@ static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self)
static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self)
{
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
u8 *prio_tc_map = self->aq_nic_cfg->prio_tc_map;
u16 action;
u8 index;
int i;
/* Action Resolver Table (ART) is used by RPF to decide which action
* to take with a packet based upon input tag and tag mask, where:
* - input tag is a combination of 3-bit VLan Prio (PTP) and
* 29-bit concatenation of all tags from filter block;
* - tag mask is a mask used for matching against input tag.
* The input_tag is compared with the all the Requested_tags in the
* Record table to find a match. Action field of the selected matched
* REC entry is used for further processing. If multiple entries match,
* the lowest REC entry, Action field will be selected.
*/
hw_atl2_rpf_act_rslvr_section_en_set(self, 0xFFFF);
hw_atl2_rpfl2_uc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC,
HW_ATL2_MAC_UC);
hw_atl2_rpfl2_bc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC);
/* FW reserves the beginning of ART, thus all driver entries must
* start from the offset specified in FW caps.
*/
index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX;
hw_atl2_act_rslvr_table_set(self, index, 0,
HW_ATL2_RPF_TAG_UC_MASK |
......@@ -220,33 +394,17 @@ static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self)
HW_ATL2_RPF_TAG_UNTAG_MASK,
HW_ATL2_ACTION_DROP);
index = priv->art_base_index + HW_ATL2_RPF_VLAN_INDEX;
hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_VLAN,
HW_ATL2_RPF_TAG_VLAN_MASK,
HW_ATL2_ACTION_ASSIGN_TC(0));
index = priv->art_base_index + HW_ATL2_RPF_MAC_INDEX;
hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_UC,
HW_ATL2_RPF_TAG_UC_MASK,
HW_ATL2_ACTION_ASSIGN_TC(0));
index = priv->art_base_index + HW_ATL2_RPF_ALLMC_INDEX;
hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_BASE_ALLMC,
HW_ATL2_RPF_TAG_ALLMC_MASK,
HW_ATL2_ACTION_ASSIGN_TC(0));
/* Configure ART to map given VLan Prio (PCP) to the TC index for
* RSS redirection table.
*/
for (i = 0; i < 8; i++) {
action = HW_ATL2_ACTION_ASSIGN_TC(prio_tc_map[i]);
index = priv->art_base_index + HW_ATL2_RPF_UNTAG_INDEX;
hw_atl2_act_rslvr_table_set(self, index, HW_ATL2_RPF_TAG_UNTAG_MASK,
HW_ATL2_RPF_TAG_UNTAG_MASK,
HW_ATL2_ACTION_ASSIGN_TC(0));
index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_ON_INDEX;
hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_VLAN_MASK,
HW_ATL2_ACTION_DISABLE);
index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_ON_INDEX;
hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_UC_MASK,
HW_ATL2_ACTION_DISABLE);
index = priv->art_base_index + HW_ATL2_RPF_PCP_TO_TC_INDEX + i;
hw_atl2_act_rslvr_table_set(self, index,
i << HW_ATL2_RPF_TAG_PCP_OFFSET,
HW_ATL2_RPF_TAG_PCP_MASK, action);
}
}
static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self,
......@@ -309,7 +467,7 @@ static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self)
int i;
/* Rx TC/RSS number config */
hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode);
/* Rx flow control */
hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
......@@ -317,9 +475,7 @@ static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self)
hw_atl2_rpf_rss_hash_type_set(self, HW_ATL2_RPF_RSS_HASH_TYPE_ALL);
/* RSS Ring selection */
hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
HW_ATL_RSS_ENABLED_3INDEX_BITS :
HW_ATL_RSS_DISABLED);
hw_atl_b0_hw_init_rx_rss_ctrl1(self);
/* Multicast filters */
for (i = HW_ATL2_MAC_MAX; i--;) {
......@@ -678,6 +834,7 @@ const struct aq_hw_ops hw_atl2_ops = {
.hw_interrupt_moderation_set = hw_atl2_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl2_hw_rss_set,
.hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
.hw_tc_rate_limit_set = hw_atl2_hw_init_tx_tc_rate_limit,
.hw_get_hw_stats = hw_atl2_utils_get_hw_stats,
.hw_get_fw_version = hw_atl2_utils_get_fw_version,
.hw_set_offload = hw_atl_b0_hw_offload_set,
......
......@@ -31,7 +31,7 @@
#define HW_ATL2_RSS_REDIRECTION_MAX 64U
#define HW_ATL2_TC_MAX 1U
#define HW_ATL2_TC_MAX 8U
#define HW_ATL2_RSS_MAX 8U
#define HW_ATL2_INTR_MODER_MAX 0x1FF
......@@ -82,13 +82,6 @@ enum HW_ATL2_RPF_ART_INDEX {
HW_ATL2_RPF_VLAN_USER_INDEX = HW_ATL2_RPF_ET_PCP_USER_INDEX + 16,
HW_ATL2_RPF_PCP_TO_TC_INDEX = HW_ATL2_RPF_VLAN_USER_INDEX +
HW_ATL_VLAN_MAX_FILTERS,
HW_ATL2_RPF_VLAN_INDEX = HW_ATL2_RPF_PCP_TO_TC_INDEX +
AQ_CFG_TCS_MAX,
HW_ATL2_RPF_MAC_INDEX,
HW_ATL2_RPF_ALLMC_INDEX,
HW_ATL2_RPF_UNTAG_INDEX,
HW_ATL2_RPF_VLAN_PROMISC_ON_INDEX,
HW_ATL2_RPF_L2_PROMISC_ON_INDEX,
};
#define HW_ATL2_ACTION(ACTION, RSS, INDEX, VALID) \
......@@ -124,9 +117,6 @@ enum HW_ATL2_RPF_RSS_HASH_TYPE {
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP,
};
#define HW_ATL_RSS_DISABLED 0x00000000U
#define HW_ATL_RSS_ENABLED_3INDEX_BITS 0xB3333333U
#define HW_ATL_MCAST_FLT_ANY_TO_HOST 0x00010FFFU
struct hw_atl2_priv {
......
......@@ -7,6 +7,14 @@
#include "hw_atl2_llh_internal.h"
#include "aq_hw_utils.h"
void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw,
u32 select)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR,
HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK,
HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT, select);
}
void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR,
......@@ -60,6 +68,15 @@ void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter)
/* TX */
void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw,
const u32 tc_q_rand_map_en)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR,
HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK,
HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT,
tc_q_rand_map_en);
}
void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR,
......@@ -76,9 +93,18 @@ void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
tx_intr_moderation_ctl);
}
void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
const u32 data_arb_mode)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR,
HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK,
HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT,
data_arb_mode);
}
void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc)
const u32 tc,
const u32 max_credit)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK,
......@@ -87,13 +113,13 @@ void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
}
void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight,
u32 tc)
const u32 tc,
const u32 weight)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc),
HW_ATL2_TPS_DATA_TCTWEIGHT_MSK,
HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT,
tx_pkt_shed_tc_data_weight);
weight);
}
u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw)
......
......@@ -15,6 +15,10 @@ void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
u32 tx_intr_moderation_ctl,
u32 queue);
/* Set Redirection Table 2 Select */
void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw,
u32 select);
/** Set RSS HASH type */
void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type);
......@@ -34,18 +38,25 @@ void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index,
/* Set VLAN filter tag */
void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter);
/* set tx random TC-queue mapping enable bit */
void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw,
const u32 tc_q_rand_map_en);
/* set tx buffer clock gate enable */
void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en);
void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
const u32 data_arb_mode);
/* set tx packet scheduler tc data max credit */
void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc);
const u32 tc,
const u32 max_credit);
/* set tx packet scheduler tc data weight */
void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight,
u32 tc);
const u32 tc,
const u32 weight);
u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw);
......
......@@ -6,6 +6,16 @@
#ifndef HW_ATL2_LLH_INTERNAL_H
#define HW_ATL2_LLH_INTERNAL_H
/* RX pif_rpf_redir_2_en_i Bitfield Definitions
* PORT="pif_rpf_redir_2_en_i"
*/
#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR 0x000054C8
#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK 0x00001000
#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSKN 0xFFFFEFFF
#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT 12
#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_WIDTH 1
#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_DEFAULT 0x0
/* RX pif_rpf_rss_hash_type_i Bitfield Definitions
*/
#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR 0x000054C8
......@@ -122,6 +132,24 @@
/* Default value of bitfield rx_q{Q}_tc_map[2:0] */
#define HW_ATL2_RX_Q_TC_MAP_DEFAULT 0x0
/* tx tx_tc_q_rand_map_en bitfield definitions
* preprocessor definitions for the bitfield "tx_tc_q_rand_map_en".
* port="pif_tpb_tx_tc_q_rand_map_en_i"
*/
/* register address for bitfield tx_tc_q_rand_map_en */
#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR 0x00007900
/* bitmask for bitfield tx_tc_q_rand_map_en */
#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK 0x00000200
/* inverted bitmask for bitfield tx_tc_q_rand_map_en */
#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSKN 0xFFFFFDFF
/* lower bit position of bitfield tx_tc_q_rand_map_en */
#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT 9
/* width of bitfield tx_tc_q_rand_map_en */
#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_WIDTH 1
/* default value of bitfield tx_tc_q_rand_map_en */
#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_DEFAULT 0x0
/* tx tx_buffer_clk_gate_en bitfield definitions
* preprocessor definitions for the bitfield "tx_buffer_clk_gate_en".
* port="pif_tpb_tx_buffer_clk_gate_en_i"
......@@ -140,42 +168,77 @@
/* default value of bitfield tx_buffer_clk_gate_en */
#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_DEFAULT 0x0
/* tx data_tc{t}_credit_max[b:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
/* tx tx_q_tc_map{q} bitfield definitions
* preprocessor definitions for the bitfield "tx_q_tc_map{q}".
* parameter: queue {q} | bit-level stride | range [0, 31]
* port="pif_tpb_tx_q_tc_map0_i[2:0]"
*/
/* register address for bitfield tx_q_tc_map{q} */
#define HW_ATL2_TX_Q_TC_MAP_ADR(queue) \
(((queue) < 32) ? 0x0000799C + ((queue) / 4) * 4 : 0)
/* lower bit position of bitfield tx_q_tc_map{q} */
#define HW_ATL2_TX_Q_TC_MAP_SHIFT(queue) \
(((queue) < 32) ? ((queue) * 8) % 32 : 0)
/* width of bitfield tx_q_tc_map{q} */
#define HW_ATL2_TX_Q_TC_MAP_WIDTH 3
/* default value of bitfield tx_q_tc_map{q} */
#define HW_ATL2_TX_Q_TC_MAP_DEFAULT 0x0
/* tx data_tc_arb_mode bitfield definitions
* preprocessor definitions for the bitfield "data_tc_arb_mode".
* port="pif_tps_data_tc_arb_mode_i"
*/
/* register address for bitfield data_tc_arb_mode */
#define HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR 0x00007100
/* bitmask for bitfield data_tc_arb_mode */
#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK 0x00000003
/* inverted bitmask for bitfield data_tc_arb_mode */
#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffc
/* lower bit position of bitfield data_tc_arb_mode */
#define HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT 0
/* width of bitfield data_tc_arb_mode */
#define HW_ATL2_TPS_DATA_TC_ARB_MODE_WIDTH 2
/* default value of bitfield data_tc_arb_mode */
#define HW_ATL2_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
/* tx data_tc{t}_credit_max[f:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_credit_max[f:0]".
* parameter: tc {t} | stride size 0x4 | range [0, 7]
* port="pif_tps_data_tc0_credit_max_i[11:0]"
* port="pif_tps_data_tc0_credit_max_i[15:0]"
*/
/* register address for bitfield data_tc{t}_credit_max[b:0] */
/* register address for bitfield data_tc{t}_credit_max[f:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000
/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff
/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
/* bitmask for bitfield data_tc{t}_credit_max[f:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK 0xffff0000
/* inverted bitmask for bitfield data_tc{t}_credit_max[f:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSKN 0x0000ffff
/* lower bit position of bitfield data_tc{t}_credit_max[f:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
/* width of bitfield data_tc{t}_credit_max[b:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_WIDTH 12
/* default value of bitfield data_tc{t}_credit_max[b:0] */
/* width of bitfield data_tc{t}_credit_max[f:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_WIDTH 16
/* default value of bitfield data_tc{t}_credit_max[f:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
/* tx data_tc{t}_weight[8:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
/* tx data_tc{t}_weight[e:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_weight[e:0]".
* parameter: tc {t} | stride size 0x4 | range [0, 7]
* port="pif_tps_data_tc0_weight_i[8:0]"
* port="pif_tps_data_tc0_weight_i[14:0]"
*/
/* register address for bitfield data_tc{t}_weight[8:0] */
/* register address for bitfield data_tc{t}_weight[e:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_weight[8:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSK 0x000001ff
/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00
/* lower bit position of bitfield data_tc{t}_weight[8:0] */
/* bitmask for bitfield data_tc{t}_weight[e:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSK 0x00007fff
/* inverted bitmask for bitfield data_tc{t}_weight[e:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSKN 0xffff8000
/* lower bit position of bitfield data_tc{t}_weight[e:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT 0
/* width of bitfield data_tc{t}_weight[8:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH 9
/* default value of bitfield data_tc{t}_weight[8:0] */
/* width of bitfield data_tc{t}_weight[e:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH 15
/* default value of bitfield data_tc{t}_weight[e:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
/* tx interrupt moderation control register definitions
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment