Commit b79f91f1 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-atlantic-QoS-implementation'

Igor Russkikh says:

====================
net: atlantic: QoS implementation

This patch series adds support for mqprio rate limiters and multi-TC:
 * max_rate is supported on both A1 and A2;
 * min_rate is supported on A2 only;

This is a joint work of Mark and Dmitry.

To implement this feature, a couple of rearrangements and code
improvements were done, in areas of TC/ring management, allocation
control, etc.

One of the problems we faced is conflicting ptp functionality, which
consumes a whole traffic class due to hardware limitations.
Patches below have a more detailed description on how PTP and multi-TC
co-exist right now.

v2:
 * accommodated review comments (-Wmissing-prototypes and
   -Wunused-but-set-variable findings);
 * added user notification in case of conflicting multi-TC<->PTP
   configuration;
 * added automatic PTP disabling, if a conflicting configuration is
   detected;
 * removed module param, which was used for PTP disabling in v1;

v1: https://patchwork.ozlabs.org/cover/1294380/
====================
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 59b8d277 40f05e5b
......@@ -88,13 +88,13 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
"InDroppedDma",
};
static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
"Queue[%d] InPackets",
"Queue[%d] OutPackets",
"Queue[%d] Restarts",
"Queue[%d] InJumboPackets",
"Queue[%d] InLroPackets",
"Queue[%d] InErrors",
static const char * const aq_ethtool_queue_stat_names[] = {
"%sQueue[%d] InPackets",
"%sQueue[%d] OutPackets",
"%sQueue[%d] Restarts",
"%sQueue[%d] InJumboPackets",
"%sQueue[%d] InLroPackets",
"%sQueue[%d] InErrors",
};
#if IS_ENABLED(CONFIG_MACSEC)
......@@ -166,7 +166,8 @@ static u32 aq_ethtool_n_stats(struct net_device *ndev)
struct aq_nic_s *nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(nic);
u32 n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs;
ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs *
cfg->tcs;
#if IS_ENABLED(CONFIG_MACSEC)
if (nic->macsec_cfg) {
......@@ -223,7 +224,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
static void aq_ethtool_get_strings(struct net_device *ndev,
u32 stringset, u8 *data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_s *nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
u8 *p = data;
int i, si;
......@@ -231,24 +232,35 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
int sa;
#endif
cfg = aq_nic_get_cfg(aq_nic);
cfg = aq_nic_get_cfg(nic);
switch (stringset) {
case ETH_SS_STATS:
case ETH_SS_STATS: {
const int stat_cnt = ARRAY_SIZE(aq_ethtool_queue_stat_names);
char tc_string[8];
int tc;
memset(tc_string, 0, sizeof(tc_string));
memcpy(p, aq_ethtool_stat_names,
sizeof(aq_ethtool_stat_names));
p = p + sizeof(aq_ethtool_stat_names);
for (tc = 0; tc < cfg->tcs; tc++) {
if (cfg->is_qos)
snprintf(tc_string, 8, "TC%d ", tc);
for (i = 0; i < cfg->vecs; i++) {
for (si = 0;
si < ARRAY_SIZE(aq_ethtool_queue_stat_names);
si++) {
for (si = 0; si < stat_cnt; si++) {
snprintf(p, ETH_GSTRING_LEN,
aq_ethtool_queue_stat_names[si], i);
aq_ethtool_queue_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
p += ETH_GSTRING_LEN;
}
}
}
#if IS_ENABLED(CONFIG_MACSEC)
if (!aq_nic->macsec_cfg)
if (!nic->macsec_cfg)
break;
memcpy(p, aq_macsec_stat_names, sizeof(aq_macsec_stat_names));
......@@ -256,7 +268,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
struct aq_macsec_txsc *aq_txsc;
if (!(test_bit(i, &aq_nic->macsec_cfg->txsc_idx_busy)))
if (!(test_bit(i, &nic->macsec_cfg->txsc_idx_busy)))
continue;
for (si = 0;
......@@ -266,7 +278,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
aq_macsec_txsc_stat_names[si], i);
p += ETH_GSTRING_LEN;
}
aq_txsc = &aq_nic->macsec_cfg->aq_txsc[i];
aq_txsc = &nic->macsec_cfg->aq_txsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
if (!(test_bit(sa, &aq_txsc->tx_sa_idx_busy)))
continue;
......@@ -283,10 +295,10 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
struct aq_macsec_rxsc *aq_rxsc;
if (!(test_bit(i, &aq_nic->macsec_cfg->rxsc_idx_busy)))
if (!(test_bit(i, &nic->macsec_cfg->rxsc_idx_busy)))
continue;
aq_rxsc = &aq_nic->macsec_cfg->aq_rxsc[i];
aq_rxsc = &nic->macsec_cfg->aq_rxsc[i];
for (sa = 0; sa < MACSEC_NUM_AN; sa++) {
if (!(test_bit(sa, &aq_rxsc->rx_sa_idx_busy)))
continue;
......@@ -302,6 +314,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
}
#endif
break;
}
case ETH_SS_PRIV_FLAGS:
memcpy(p, aq_ethtool_priv_flag_names,
sizeof(aq_ethtool_priv_flag_names));
......@@ -780,8 +793,6 @@ static int aq_set_ringparam(struct net_device *ndev,
dev_close(ndev);
}
aq_nic_free_vectors(aq_nic);
cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
cfg->rxds = min(cfg->rxds, hw_caps->rxds_max);
cfg->rxds = ALIGN(cfg->rxds, AQ_HW_RXD_MULTIPLE);
......@@ -790,15 +801,10 @@ static int aq_set_ringparam(struct net_device *ndev,
cfg->txds = min(cfg->txds, hw_caps->txds_max);
cfg->txds = ALIGN(cfg->txds, AQ_HW_TXD_MULTIPLE);
for (aq_nic->aq_vecs = 0; aq_nic->aq_vecs < cfg->vecs;
aq_nic->aq_vecs++) {
aq_nic->aq_vec[aq_nic->aq_vecs] =
aq_vec_alloc(aq_nic, aq_nic->aq_vecs, cfg);
if (unlikely(!aq_nic->aq_vec[aq_nic->aq_vecs])) {
err = -ENOMEM;
err = aq_nic_realloc_vectors(aq_nic);
if (err)
goto err_exit;
}
}
if (ndev_running)
err = dev_open(ndev, NULL);
......
......@@ -153,6 +153,8 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
struct aq_hw_rx_fltrs_s *rx_fltrs,
struct ethtool_rx_flow_spec *fsp)
{
struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
fsp->location > AQ_RX_LAST_LOC_FVLANID) {
netdev_err(aq_nic->ndev,
......@@ -170,10 +172,10 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
return -EINVAL;
}
if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
if (fsp->ring_cookie > cfg->num_rss_queues * cfg->tcs) {
netdev_err(aq_nic->ndev,
"ethtool: queue number must be in range [0, %d]",
aq_nic->aq_nic_cfg.num_rss_queues - 1);
cfg->num_rss_queues * cfg->tcs - 1);
return -EINVAL;
}
return 0;
......@@ -262,6 +264,7 @@ static bool __must_check
aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
struct ethtool_rx_flow_spec *fsp)
{
struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
bool rule_is_not_correct = false;
if (!aq_nic) {
......@@ -274,11 +277,11 @@ aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
} else if (aq_check_filter(aq_nic, fsp)) {
rule_is_not_correct = true;
} else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
if (fsp->ring_cookie >= cfg->num_rss_queues * cfg->tcs) {
netdev_err(aq_nic->ndev,
"ethtool: The specified action is invalid.\n"
"Maximum allowable value action is %u.\n",
aq_nic->aq_nic_cfg.num_rss_queues - 1);
cfg->num_rss_queues * cfg->tcs - 1);
rule_is_not_correct = true;
}
}
......
......@@ -18,6 +18,12 @@
#define AQ_HW_MAC_COUNTER_HZ 312500000ll
#define AQ_HW_PHY_COUNTER_HZ 160000000ll
enum aq_tc_mode {
AQ_TC_MODE_INVALID = -1,
AQ_TC_MODE_8TCS,
AQ_TC_MODE_4TCS,
};
#define AQ_RX_FIRST_LOC_FVLANID 0U
#define AQ_RX_LAST_LOC_FVLANID 15U
#define AQ_RX_FIRST_LOC_FETHERT 16U
......@@ -29,6 +35,9 @@
(AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU
/* Used for rate to Mbps conversion */
#define AQ_MBPS_DIVISOR 125000 /* 1000000 / 8 */
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
......@@ -46,7 +55,7 @@ struct aq_hw_caps_s {
u32 mac_regs_count;
u32 hw_alive_check_addr;
u8 msix_irqs;
u8 tcs;
u8 tcs_max;
u8 rxd_alignment;
u8 rxd_size;
u8 txd_alignment;
......@@ -118,8 +127,11 @@ struct aq_stats_s {
#define AQ_HW_TXD_MULTIPLE 8U
#define AQ_HW_RXD_MULTIPLE 8U
#define AQ_HW_QUEUES_MAX 32U
#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
#define AQ_HW_PTP_TC 2U
#define AQ_HW_LED_BLINK 0x2U
#define AQ_HW_LED_DEFAULT 0x0U
......@@ -268,6 +280,8 @@ struct aq_hw_ops {
int (*hw_rss_hash_set)(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params);
int (*hw_tc_rate_limit_set)(struct aq_hw_s *self);
int (*hw_get_regs)(struct aq_hw_s *self,
const struct aq_hw_caps_s *aq_hw_caps,
u32 *regs_buff);
......@@ -279,10 +293,6 @@ struct aq_hw_ops {
int (*hw_set_offload)(struct aq_hw_s *self,
struct aq_nic_cfg_s *aq_nic_cfg);
int (*hw_tx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
int (*hw_rx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
int (*hw_ring_hwts_rx_fill)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
......
......@@ -79,3 +79,29 @@ int aq_hw_err_from_flags(struct aq_hw_s *hw)
err_exit:
return err;
}
int aq_hw_num_tcs(struct aq_hw_s *hw)
{
switch (hw->aq_nic_cfg->tc_mode) {
case AQ_TC_MODE_8TCS:
return 8;
case AQ_TC_MODE_4TCS:
return 4;
default:
break;
}
return 1;
}
int aq_hw_q_per_tc(struct aq_hw_s *hw)
{
switch (hw->aq_nic_cfg->tc_mode) {
case AQ_TC_MODE_8TCS:
return 4;
case AQ_TC_MODE_4TCS:
return 8;
default:
return 4;
}
}
......@@ -34,5 +34,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
int aq_hw_num_tcs(struct aq_hw_s *hw);
int aq_hw_q_per_tc(struct aq_hw_s *hw);
#endif /* AQ_HW_UTILS_H */
......@@ -478,7 +478,7 @@ static int aq_mdo_add_secy(struct macsec_context *ctx)
set_bit(txsc_idx, &cfg->txsc_idx_busy);
return 0;
return ret;
}
static int aq_mdo_upd_secy(struct macsec_context *ctx)
......
......@@ -12,11 +12,13 @@
#include "aq_ethtool.h"
#include "aq_ptp.h"
#include "aq_filters.h"
#include "aq_hw_utils.h"
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
......@@ -38,7 +40,7 @@ struct net_device *aq_ndev_alloc(void)
struct net_device *ndev = NULL;
struct aq_nic_s *aq_nic = NULL;
ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
ndev = alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_HW_QUEUES_MAX);
if (!ndev)
return NULL;
......@@ -330,6 +332,73 @@ static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
return 0;
}
static int aq_validate_mqprio_opt(struct aq_nic_s *self,
struct tc_mqprio_qopt_offload *mqprio,
const unsigned int num_tc)
{
const bool has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
struct aq_nic_cfg_s *aq_nic_cfg = aq_nic_get_cfg(self);
const unsigned int tcs_max = min_t(u8, aq_nic_cfg->aq_hw_caps->tcs_max,
AQ_CFG_TCS_MAX);
if (num_tc > tcs_max) {
netdev_err(self->ndev, "Too many TCs requested\n");
return -EOPNOTSUPP;
}
if (num_tc != 0 && !is_power_of_2(num_tc)) {
netdev_err(self->ndev, "TC count should be power of 2\n");
return -EOPNOTSUPP;
}
if (has_min_rate && !ATL_HW_IS_CHIP_FEATURE(self->aq_hw, ANTIGUA)) {
netdev_err(self->ndev, "Min tx rate is not supported\n");
return -EOPNOTSUPP;
}
return 0;
}
static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct tc_mqprio_qopt_offload *mqprio = type_data;
struct aq_nic_s *aq_nic = netdev_priv(dev);
bool has_min_rate;
bool has_max_rate;
int err;
int i;
if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
err = aq_validate_mqprio_opt(aq_nic, mqprio, mqprio->qopt.num_tc);
if (err)
return err;
for (i = 0; i < mqprio->qopt.num_tc; i++) {
if (has_max_rate) {
u64 max_rate = mqprio->max_rate[i];
do_div(max_rate, AQ_MBPS_DIVISOR);
aq_nic_setup_tc_max_rate(aq_nic, i, (u32)max_rate);
}
if (has_min_rate) {
u64 min_rate = mqprio->min_rate[i];
do_div(min_rate, AQ_MBPS_DIVISOR);
aq_nic_setup_tc_min_rate(aq_nic, i, (u32)min_rate);
}
}
return aq_nic_setup_tc_mqprio(aq_nic, mqprio->qopt.num_tc,
mqprio->qopt.prio_tc_map);
}
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
......@@ -341,6 +410,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_do_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
.ndo_setup_tc = aq_ndo_setup_tc,
};
static int __init aq_ndev_init_module(void)
......
......@@ -59,8 +59,15 @@ struct aq_nic_cfg_s {
bool is_polling;
bool is_rss;
bool is_lro;
bool is_qos;
bool is_ptp;
enum aq_tc_mode tc_mode;
u32 priv_flags;
u8 tcs;
u8 prio_tc_map[8];
u32 tc_max_rate[AQ_CFG_TCS_MAX];
unsigned long tc_min_rate_msk;
u32 tc_min_rate[AQ_CFG_TCS_MAX];
struct aq_rss_parameters aq_rss;
u32 eee_speeds;
};
......@@ -77,8 +84,16 @@ struct aq_nic_cfg_s {
#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\
WAKE_PHY)
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
#define AQ_NIC_CFG_RING_PER_TC(_NIC_CFG_) \
(((_NIC_CFG_)->tc_mode == AQ_TC_MODE_4TCS) ? 8 : 4)
#define AQ_NIC_CFG_TCVEC2RING(_NIC_CFG_, _TC_, _VEC_) \
((_TC_) * AQ_NIC_CFG_RING_PER_TC(_NIC_CFG_) + (_VEC_))
#define AQ_NIC_RING2QMAP(_NIC_, _ID_) \
((_ID_) / AQ_NIC_CFG_RING_PER_TC(&(_NIC_)->aq_nic_cfg) * \
(_NIC_)->aq_vecs + \
((_ID_) % AQ_NIC_CFG_RING_PER_TC(&(_NIC_)->aq_nic_cfg)))
struct aq_hw_rx_fl2 {
struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS];
......@@ -104,7 +119,7 @@ struct aq_nic_s {
atomic_t flags;
u32 msg_enable;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX];
struct aq_hw_s *aq_hw;
struct net_device *ndev;
unsigned int aq_vecs;
......@@ -164,6 +179,7 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down);
void aq_nic_set_power(struct aq_nic_s *self);
void aq_nic_free_hot_resources(struct aq_nic_s *self);
void aq_nic_free_vectors(struct aq_nic_s *self);
int aq_nic_realloc_vectors(struct aq_nic_s *self);
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev);
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags);
......@@ -181,4 +197,9 @@ void aq_nic_shutdown(struct aq_nic_s *self);
u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);
void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
u32 location);
int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map);
int aq_nic_setup_tc_max_rate(struct aq_nic_s *self, const unsigned int tc,
const u32 max_rate);
int aq_nic_setup_tc_min_rate(struct aq_nic_s *self, const unsigned int tc,
const u32 min_rate);
#endif /* AQ_NIC_H */
......@@ -431,6 +431,9 @@ static int atl_resume_common(struct device *dev, bool deep)
netif_tx_start_all_queues(nic->ndev);
err_exit:
if (ret < 0)
aq_nic_deinit(nic, true);
rtnl_unlock();
return ret;
......
......@@ -945,26 +945,29 @@ void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
#define PTP_4TC_RING_IDX 16
#define PTP_HWST_RING_IDX 31
/* Index must be 8 (8 TCs) or 16 (4 TCs).
* It depends on Traffic Class mode.
*/
static unsigned int ptp_ring_idx(const enum aq_tc_mode tc_mode)
{
if (tc_mode == AQ_TC_MODE_8TCS)
return PTP_8TC_RING_IDX;
return PTP_4TC_RING_IDX;
}
int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
unsigned int tx_ring_idx, rx_ring_idx;
struct aq_ring_s *hwts;
u32 tx_tc_mode, rx_tc_mode;
struct aq_ring_s *ring;
int err;
if (!aq_ptp)
return 0;
/* Index must to be 8 (8 TCs) or 16 (4 TCs).
* It depends from Traffic Class mode.
*/
aq_nic->aq_hw_ops->hw_tx_tc_mode_get(aq_nic->aq_hw, &tx_tc_mode);
if (tx_tc_mode == 0)
tx_ring_idx = PTP_8TC_RING_IDX;
else
tx_ring_idx = PTP_4TC_RING_IDX;
tx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
tx_ring_idx, &aq_nic->aq_nic_cfg);
......@@ -973,11 +976,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
goto err_exit;
}
aq_nic->aq_hw_ops->hw_rx_tc_mode_get(aq_nic->aq_hw, &rx_tc_mode);
if (rx_tc_mode == 0)
rx_ring_idx = PTP_8TC_RING_IDX;
else
rx_ring_idx = PTP_4TC_RING_IDX;
rx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
rx_ring_idx, &aq_nic->aq_nic_cfg);
......
......@@ -232,8 +232,11 @@ void aq_ring_queue_wake(struct aq_ring_s *ring)
{
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
if (__netif_subqueue_stopped(ndev, ring->idx)) {
netif_wake_subqueue(ndev, ring->idx);
if (__netif_subqueue_stopped(ndev,
AQ_NIC_RING2QMAP(ring->aq_nic,
ring->idx))) {
netif_wake_subqueue(ndev,
AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
ring->stats.tx.queue_restarts++;
}
}
......@@ -242,8 +245,11 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)
{
struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
if (!__netif_subqueue_stopped(ndev, ring->idx))
netif_stop_subqueue(ndev, ring->idx);
if (!__netif_subqueue_stopped(ndev,
AQ_NIC_RING2QMAP(ring->aq_nic,
ring->idx)))
netif_stop_subqueue(ndev,
AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
}
bool aq_ring_tx_clean(struct aq_ring_s *self)
......@@ -466,7 +472,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
PKT_HASH_TYPE_NONE);
/* Send all PTP traffic to 0 queue */
skb_record_rx_queue(skb, is_ptp_ring ? 0 : self->idx);
skb_record_rx_queue(skb,
is_ptp_ring ? 0
: AQ_NIC_RING2QMAP(self->aq_nic,
self->idx));
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
......
......@@ -103,16 +103,11 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg)
{
struct aq_ring_s *ring = NULL;
struct aq_vec_s *self = NULL;
unsigned int i = 0U;
int err = 0;
self = kzalloc(sizeof(*self), GFP_KERNEL);
if (!self) {
err = -ENOMEM;
if (!self)
goto err_exit;
}
self->aq_nic = aq_nic;
self->aq_ring_param.vec_idx = idx;
......@@ -128,10 +123,20 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
err_exit:
return self;
}
int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg)
{
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
int err = 0;
for (i = 0; i < aq_nic_cfg->tcs; ++i) {
unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic,
self->tx_rings,
self->aq_ring_param.vec_idx);
const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
i, idx);
ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
idx_ring, aq_nic_cfg);
......@@ -156,11 +161,11 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
err_exit:
if (err < 0) {
aq_vec_free(self);
aq_vec_ring_free(self);
self = NULL;
}
return self;
return err;
}
int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
......@@ -269,6 +274,18 @@ err_exit:;
}
void aq_vec_free(struct aq_vec_s *self)
{
if (!self)
goto err_exit;
netif_napi_del(&self->napi);
kfree(self);
err_exit:;
}
void aq_vec_ring_free(struct aq_vec_s *self)
{
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
......@@ -279,13 +296,12 @@ void aq_vec_free(struct aq_vec_s *self)
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
aq_ring_free(&ring[AQ_VEC_TX_ID]);
if (i < self->rx_rings)
aq_ring_free(&ring[AQ_VEC_RX_ID]);
}
netif_napi_del(&self->napi);
kfree(self);
self->tx_rings = 0;
self->rx_rings = 0;
err_exit:;
}
......@@ -333,16 +349,14 @@ cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
return &self->aq_ring_param.affinity_mask;
}
void aq_vec_add_stats(struct aq_vec_s *self,
static void aq_vec_add_stats(struct aq_vec_s *self,
const unsigned int tc,
struct aq_ring_stats_rx_s *stats_rx,
struct aq_ring_stats_tx_s *stats_tx)
{
struct aq_ring_s *ring = NULL;
unsigned int r = 0U;
struct aq_ring_s *ring = self->ring[tc];
for (r = 0U, ring = self->ring[0];
self->tx_rings > r; ++r, ring = self->ring[r]) {
struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
if (tc < self->rx_rings) {
struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
stats_rx->packets += rx->packets;
......@@ -353,6 +367,10 @@ void aq_vec_add_stats(struct aq_vec_s *self,
stats_rx->pg_losts += rx->pg_losts;
stats_rx->pg_flips += rx->pg_flips;
stats_rx->pg_reuses += rx->pg_reuses;
}
if (tc < self->tx_rings) {
struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
stats_tx->packets += tx->packets;
stats_tx->bytes += tx->bytes;
......@@ -361,7 +379,8 @@ void aq_vec_add_stats(struct aq_vec_s *self,
}
}
int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data,
unsigned int *p_count)
{
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
......@@ -369,7 +388,8 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
aq_vec_add_stats(self, &stats_rx, &stats_tx);
aq_vec_add_stats(self, tc, &stats_rx, &stats_tx);
/* This data should mimic aq_ethtool_queue_stat_names structure
*/
......
......@@ -25,17 +25,17 @@ irqreturn_t aq_vec_isr(int irq, void *private);
irqreturn_t aq_vec_isr_legacy(int irq, void *private);
struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg);
int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
struct aq_hw_s *aq_hw);
void aq_vec_deinit(struct aq_vec_s *self);
void aq_vec_free(struct aq_vec_s *self);
void aq_vec_ring_free(struct aq_vec_s *self);
int aq_vec_start(struct aq_vec_s *self);
void aq_vec_stop(struct aq_vec_s *self);
cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self);
int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data,
int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data,
unsigned int *p_count);
void aq_vec_add_stats(struct aq_vec_s *self,
struct aq_ring_stats_rx_s *stats_rx,
struct aq_ring_stats_tx_s *stats_tx);
#endif /* AQ_VEC_H */
......@@ -21,7 +21,7 @@
.msix_irqs = 4U, \
.irq_mask = ~0U, \
.vecs = HW_ATL_A0_RSS_MAX, \
.tcs = HW_ATL_A0_TC_MAX, \
.tcs_max = HW_ATL_A0_TC_MAX, \
.rxd_alignment = 1U, \
.rxd_size = HW_ATL_A0_RXD_SIZE, \
.rxds_max = HW_ATL_A0_MAX_RXD, \
......@@ -136,10 +136,10 @@ static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0U, 0xFFF);
hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0U, 0x64);
hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0U, 0x50);
hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0U, 0x1E);
/* Tx buf size */
buff_size = HW_ATL_A0_TXBUF_MAX;
......
......@@ -58,6 +58,8 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self);
int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr);
int hw_atl_b0_hw_start(struct aq_hw_s *self);
......
......@@ -75,7 +75,7 @@
#define HW_ATL_B0_RSS_HASHKEY_BITS 320U
#define HW_ATL_B0_TCRSS_4_8 1
#define HW_ATL_B0_TC_MAX 1U
#define HW_ATL_B0_TC_MAX 8U
#define HW_ATL_B0_RSS_MAX 8U
#define HW_ATL_B0_LRO_RXD_MAX 16U
......@@ -151,6 +151,10 @@
#define HW_ATL_B0_MAX_RXD 8184U
#define HW_ATL_B0_MAX_TXD 8184U
#define HW_ATL_RSS_DISABLED 0x00000000U
#define HW_ATL_RSS_ENABLED_8TCS_2INDEX_BITS 0xA2222222U
#define HW_ATL_RSS_ENABLED_4TCS_3INDEX_BITS 0x80003333U
/* HW layer capabilities */
#endif /* HW_ATL_B0_INTERNAL_H */
......@@ -754,7 +754,7 @@ void hw_atl_rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
}
void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
u32 user_priority_tc_map, u32 tc)
u32 user_priority, u32 tc)
{
/* register address for bitfield rx_tc_up{t}[2:0] */
static u32 rpf_rpb_rx_tc_upt_adr[8] = {
......@@ -773,10 +773,9 @@ void hw_atl_rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
};
aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc],
rpf_rpb_rx_tc_upt_msk[tc],
rpf_rpb_rx_tc_upt_shft[tc],
user_priority_tc_map);
aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[user_priority],
rpf_rpb_rx_tc_upt_msk[user_priority],
rpf_rpb_rx_tc_upt_shft[user_priority], tc);
}
void hw_atl_rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
......@@ -1464,8 +1463,8 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc)
const u32 tc,
const u32 max_credit)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTCREDIT_MAX_ADR(tc),
HW_ATL_TPS_DESC_TCTCREDIT_MAX_MSK,
......@@ -1474,13 +1473,13 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_weight,
u32 tc)
const u32 tc,
const u32 weight)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_TCTWEIGHT_ADR(tc),
HW_ATL_TPS_DESC_TCTWEIGHT_MSK,
HW_ATL_TPS_DESC_TCTWEIGHT_SHIFT,
tx_pkt_shed_desc_tc_weight);
weight);
}
void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
......@@ -1493,8 +1492,8 @@ void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc)
const u32 tc,
const u32 max_credit)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
HW_ATL_TPS_DATA_TCTCREDIT_MAX_MSK,
......@@ -1503,13 +1502,49 @@ void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
}
void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight,
u32 tc)
const u32 tc,
const u32 weight)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DATA_TCTWEIGHT_ADR(tc),
HW_ATL_TPS_DATA_TCTWEIGHT_MSK,
HW_ATL_TPS_DATA_TCTWEIGHT_SHIFT,
tx_pkt_shed_tc_data_weight);
weight);
}
void hw_atl_tps_tx_desc_rate_mode_set(struct aq_hw_s *aq_hw,
const u32 rate_mode)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_TX_DESC_RATE_MODE_ADR,
HW_ATL_TPS_TX_DESC_RATE_MODE_MSK,
HW_ATL_TPS_TX_DESC_RATE_MODE_SHIFT,
rate_mode);
}
void hw_atl_tps_tx_desc_rate_en_set(struct aq_hw_s *aq_hw, const u32 desc,
const u32 enable)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_EN_ADR(desc),
HW_ATL_TPS_DESC_RATE_EN_MSK,
HW_ATL_TPS_DESC_RATE_EN_SHIFT,
enable);
}
void hw_atl_tps_tx_desc_rate_x_set(struct aq_hw_s *aq_hw, const u32 desc,
const u32 rate_int)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_X_ADR(desc),
HW_ATL_TPS_DESC_RATE_X_MSK,
HW_ATL_TPS_DESC_RATE_X_SHIFT,
rate_int);
}
void hw_atl_tps_tx_desc_rate_y_set(struct aq_hw_s *aq_hw, const u32 desc,
const u32 rate_frac)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TPS_DESC_RATE_Y_ADR(desc),
HW_ATL_TPS_DESC_RATE_Y_MSK,
HW_ATL_TPS_DESC_RATE_Y_SHIFT,
rate_frac);
}
/* tx */
......
......@@ -688,13 +688,13 @@ void hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
/* set tx packet scheduler descriptor tc max credit */
void hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc);
const u32 tc,
const u32 max_credit);
/* set tx packet scheduler descriptor tc weight */
void hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_desc_tc_weight,
u32 tc);
const u32 tc,
const u32 weight);
/* set tx packet scheduler descriptor vm arbitration mode */
void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
......@@ -702,13 +702,29 @@ void hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
/* set tx packet scheduler tc data max credit */
void hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc);
const u32 tc,
const u32 max_credit);
/* set tx packet scheduler tc data weight */
void hw_atl_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight,
u32 tc);
const u32 tc,
const u32 weight);
/* set tx descriptor rate mode */
void hw_atl_tps_tx_desc_rate_mode_set(struct aq_hw_s *aq_hw,
const u32 rate_mode);
/* set tx packet scheduler descriptor rate enable */
void hw_atl_tps_tx_desc_rate_en_set(struct aq_hw_s *aq_hw, const u32 desc,
const u32 enable);
/* set tx packet scheduler descriptor rate integral value */
void hw_atl_tps_tx_desc_rate_x_set(struct aq_hw_s *aq_hw, const u32 desc,
const u32 rate_int);
/* set tx packet scheduler descriptor rate fractional value */
void hw_atl_tps_tx_desc_rate_y_set(struct aq_hw_s *aq_hw, const u32 desc,
const u32 rate_frac);
/* tx */
......
......@@ -2038,6 +2038,42 @@
/* default value of bitfield lso_tcp_flag_mid[b:0] */
#define HW_ATL_THM_LSO_TCP_FLAG_MID_DEFAULT 0x0
/* tx tx_tc_mode bitfield definitions
* preprocessor definitions for the bitfield "tx_tc_mode".
* port="pif_tpb_tx_tc_mode_i,pif_tps_tx_tc_mode_i"
*/
/* register address for bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
/* bitmask for bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
/* inverted bitmask for bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
/* lower bit position of bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
/* width of bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
/* default value of bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
/* tx tx_desc_rate_mode bitfield definitions
* preprocessor definitions for the bitfield "tx_desc_rate_mode".
* port="pif_tps_desc_rate_mode_i"
*/
/* register address for bitfield tx_desc_rate_mode */
#define HW_ATL_TPS_TX_DESC_RATE_MODE_ADR 0x00007900
/* bitmask for bitfield tx_desc_rate_mode */
#define HW_ATL_TPS_TX_DESC_RATE_MODE_MSK 0x00000080
/* inverted bitmask for bitfield tx_desc_rate_mode */
#define HW_ATL_TPS_TX_DESC_RATE_MODE_MSKN 0xFFFFFF7F
/* lower bit position of bitfield tx_desc_rate_mode */
#define HW_ATL_TPS_TX_DESC_RATE_MODE_SHIFT 7
/* width of bitfield tx_desc_rate_mode */
#define HW_ATL_TPS_TX_DESC_RATE_MODE_WIDTH 1
/* default value of bitfield tx_desc_rate_mode */
#define HW_ATL_TPS_TX_DESC_RATE_MODE_DEFAULT 0x0
/* tx tx_buf_en bitfield definitions
* preprocessor definitions for the bitfield "tx_buf_en".
* port="pif_tpb_tx_buf_en_i"
......@@ -2056,19 +2092,6 @@
/* default value of bitfield tx_buf_en */
#define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
/* register address for bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
/* bitmask for bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
/* inverted bitmask for bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
/* lower bit position of bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
/* width of bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
/* default value of bitfield tx_tc_mode */
#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
/* tx tx{b}_hi_thresh[c:0] bitfield definitions
* preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
* parameter: buffer {b} | stride size 0x10 | range [0, 7]
......@@ -2270,6 +2293,58 @@
/* default value of bitfield data_tc_arb_mode */
#define HW_ATL_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
/* tx desc{r}_rate_en bitfield definitions
* preprocessor definitions for the bitfield "desc{r}_rate_en".
* port="pif_tps_desc_rate_en_i[0]"
*/
/* register address for bitfield desc{r}_rate_en */
#define HW_ATL_TPS_DESC_RATE_EN_ADR(desc) (0x00007408 + (desc) * 0x10)
/* bitmask for bitfield desc{r}_rate_en */
#define HW_ATL_TPS_DESC_RATE_EN_MSK 0x80000000
/* inverted bitmask for bitfield desc{r}_rate_en */
#define HW_ATL_TPS_DESC_RATE_EN_MSKN 0x7FFFFFFF
/* lower bit position of bitfield desc{r}_rate_en */
#define HW_ATL_TPS_DESC_RATE_EN_SHIFT 31
/* width of bitfield desc{r}_rate_en */
#define HW_ATL_TPS_DESC_RATE_EN_WIDTH 1
/* default value of bitfield desc{r}_rate_en */
#define HW_ATL_TPS_DESC_RATE_EN_DEFAULT 0x0
/* tx desc{r}_rate_x bitfield definitions
* preprocessor definitions for the bitfield "desc{r}_rate_x".
* port="pif_tps_desc0_rate_x"
*/
/* register address for bitfield desc{r}_rate_x */
#define HW_ATL_TPS_DESC_RATE_X_ADR(desc) (0x00007408 + (desc) * 0x10)
/* bitmask for bitfield desc{r}_rate_x */
#define HW_ATL_TPS_DESC_RATE_X_MSK 0x03FF0000
/* inverted bitmask for bitfield desc{r}_rate_x */
#define HW_ATL_TPS_DESC_RATE_X_MSKN 0xFC00FFFF
/* lower bit position of bitfield desc{r}_rate_x */
#define HW_ATL_TPS_DESC_RATE_X_SHIFT 16
/* width of bitfield desc{r}_rate_x */
#define HW_ATL_TPS_DESC_RATE_X_WIDTH 10
/* default value of bitfield desc{r}_rate_x */
#define HW_ATL_TPS_DESC_RATE_X_DEFAULT 0x0
/* tx desc{r}_rate_y bitfield definitions
* preprocessor definitions for the bitfield "desc{r}_rate_y".
* port="pif_tps_desc0_rate_y"
*/
/* register address for bitfield desc{r}_rate_y */
#define HW_ATL_TPS_DESC_RATE_Y_ADR(desc) (0x00007408 + (desc) * 0x10)
/* bitmask for bitfield desc{r}_rate_y */
#define HW_ATL_TPS_DESC_RATE_Y_MSK 0x00003FFF
/* inverted bitmask for bitfield desc{r}_rate_y */
#define HW_ATL_TPS_DESC_RATE_Y_MSKN 0xFFFFC000
/* lower bit position of bitfield desc{r}_rate_y */
#define HW_ATL_TPS_DESC_RATE_Y_SHIFT 0
/* width of bitfield desc{r}_rate_y */
#define HW_ATL_TPS_DESC_RATE_Y_WIDTH 14
/* default value of bitfield desc{r}_rate_y */
#define HW_ATL_TPS_DESC_RATE_Y_DEFAULT 0x0
/* tx desc_rate_ta_rst bitfield definitions
* preprocessor definitions for the bitfield "desc_rate_ta_rst".
* port="pif_tps_desc_rate_ta_rst_i"
......
......@@ -31,7 +31,7 @@
#define HW_ATL2_RSS_REDIRECTION_MAX 64U
#define HW_ATL2_TC_MAX 1U
#define HW_ATL2_TC_MAX 8U
#define HW_ATL2_RSS_MAX 8U
#define HW_ATL2_INTR_MODER_MAX 0x1FF
......@@ -82,13 +82,6 @@ enum HW_ATL2_RPF_ART_INDEX {
HW_ATL2_RPF_VLAN_USER_INDEX = HW_ATL2_RPF_ET_PCP_USER_INDEX + 16,
HW_ATL2_RPF_PCP_TO_TC_INDEX = HW_ATL2_RPF_VLAN_USER_INDEX +
HW_ATL_VLAN_MAX_FILTERS,
HW_ATL2_RPF_VLAN_INDEX = HW_ATL2_RPF_PCP_TO_TC_INDEX +
AQ_CFG_TCS_MAX,
HW_ATL2_RPF_MAC_INDEX,
HW_ATL2_RPF_ALLMC_INDEX,
HW_ATL2_RPF_UNTAG_INDEX,
HW_ATL2_RPF_VLAN_PROMISC_ON_INDEX,
HW_ATL2_RPF_L2_PROMISC_ON_INDEX,
};
#define HW_ATL2_ACTION(ACTION, RSS, INDEX, VALID) \
......@@ -124,9 +117,6 @@ enum HW_ATL2_RPF_RSS_HASH_TYPE {
HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP,
};
#define HW_ATL_RSS_DISABLED 0x00000000U
#define HW_ATL_RSS_ENABLED_3INDEX_BITS 0xB3333333U
#define HW_ATL_MCAST_FLT_ANY_TO_HOST 0x00010FFFU
struct hw_atl2_priv {
......
......@@ -7,6 +7,14 @@
#include "hw_atl2_llh_internal.h"
#include "aq_hw_utils.h"
void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw,
u32 select)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR,
HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK,
HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT, select);
}
void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR,
......@@ -60,6 +68,15 @@ void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter)
/* TX */
void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw,
const u32 tc_q_rand_map_en)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR,
HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK,
HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT,
tc_q_rand_map_en);
}
void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR,
......@@ -76,9 +93,18 @@ void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
tx_intr_moderation_ctl);
}
void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
const u32 data_arb_mode)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR,
HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK,
HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT,
data_arb_mode);
}
void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc)
const u32 tc,
const u32 max_credit)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc),
HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK,
......@@ -87,13 +113,13 @@ void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
}
void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight,
u32 tc)
const u32 tc,
const u32 weight)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc),
HW_ATL2_TPS_DATA_TCTWEIGHT_MSK,
HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT,
tx_pkt_shed_tc_data_weight);
weight);
}
u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw)
......
......@@ -15,6 +15,10 @@ void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
u32 tx_intr_moderation_ctl,
u32 queue);
/* Set Redirection Table 2 Select */
void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw,
u32 select);
/** Set RSS HASH type */
void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type);
......@@ -34,18 +38,25 @@ void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index,
/* Set VLAN filter tag */
void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter);
/* set tx random TC-queue mapping enable bit */
void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw,
const u32 tc_q_rand_map_en);
/* set tx buffer clock gate enable */
void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en);
void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
const u32 data_arb_mode);
/* set tx packet scheduler tc data max credit */
void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
u32 max_credit,
u32 tc);
const u32 tc,
const u32 max_credit);
/* set tx packet scheduler tc data weight */
void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
u32 tx_pkt_shed_tc_data_weight,
u32 tc);
const u32 tc,
const u32 weight);
u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw);
......
......@@ -6,6 +6,16 @@
#ifndef HW_ATL2_LLH_INTERNAL_H
#define HW_ATL2_LLH_INTERNAL_H
/* RX pif_rpf_redir_2_en_i Bitfield Definitions
* PORT="pif_rpf_redir_2_en_i"
*/
#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR 0x000054C8
#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK 0x00001000
#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSKN 0xFFFFEFFF
#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT 12
#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_WIDTH 1
#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_DEFAULT 0x0
/* RX pif_rpf_rss_hash_type_i Bitfield Definitions
*/
#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR 0x000054C8
......@@ -122,6 +132,24 @@
/* Default value of bitfield rx_q{Q}_tc_map[2:0] */
#define HW_ATL2_RX_Q_TC_MAP_DEFAULT 0x0
/* tx tx_tc_q_rand_map_en bitfield definitions
* preprocessor definitions for the bitfield "tx_tc_q_rand_map_en".
* port="pif_tpb_tx_tc_q_rand_map_en_i"
*/
/* register address for bitfield tx_tc_q_rand_map_en */
#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR 0x00007900
/* bitmask for bitfield tx_tc_q_rand_map_en */
#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK 0x00000200
/* inverted bitmask for bitfield tx_tc_q_rand_map_en */
#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSKN 0xFFFFFDFF
/* lower bit position of bitfield tx_tc_q_rand_map_en */
#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT 9
/* width of bitfield tx_tc_q_rand_map_en */
#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_WIDTH 1
/* default value of bitfield tx_tc_q_rand_map_en */
#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_DEFAULT 0x0
/* tx tx_buffer_clk_gate_en bitfield definitions
* preprocessor definitions for the bitfield "tx_buffer_clk_gate_en".
* port="pif_tpb_tx_buffer_clk_gate_en_i"
......@@ -140,42 +168,77 @@
/* default value of bitfield tx_buffer_clk_gate_en */
#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_DEFAULT 0x0
/* tx data_tc{t}_credit_max[b:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
/* tx tx_q_tc_map{q} bitfield definitions
* preprocessor definitions for the bitfield "tx_q_tc_map{q}".
* parameter: queue {q} | bit-level stride | range [0, 31]
* port="pif_tpb_tx_q_tc_map0_i[2:0]"
*/
/* register address for bitfield tx_q_tc_map{q} */
#define HW_ATL2_TX_Q_TC_MAP_ADR(queue) \
(((queue) < 32) ? 0x0000799C + ((queue) / 4) * 4 : 0)
/* lower bit position of bitfield tx_q_tc_map{q} */
#define HW_ATL2_TX_Q_TC_MAP_SHIFT(queue) \
(((queue) < 32) ? ((queue) * 8) % 32 : 0)
/* width of bitfield tx_q_tc_map{q} */
#define HW_ATL2_TX_Q_TC_MAP_WIDTH 3
/* default value of bitfield tx_q_tc_map{q} */
#define HW_ATL2_TX_Q_TC_MAP_DEFAULT 0x0
/* tx data_tc_arb_mode bitfield definitions
* preprocessor definitions for the bitfield "data_tc_arb_mode".
* port="pif_tps_data_tc_arb_mode_i"
*/
/* register address for bitfield data_tc_arb_mode */
#define HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR 0x00007100
/* bitmask for bitfield data_tc_arb_mode */
#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK 0x00000003
/* inverted bitmask for bitfield data_tc_arb_mode */
#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffc
/* lower bit position of bitfield data_tc_arb_mode */
#define HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT 0
/* width of bitfield data_tc_arb_mode */
#define HW_ATL2_TPS_DATA_TC_ARB_MODE_WIDTH 2
/* default value of bitfield data_tc_arb_mode */
#define HW_ATL2_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0
/* tx data_tc{t}_credit_max[f:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_credit_max[f:0]".
* parameter: tc {t} | stride size 0x4 | range [0, 7]
* port="pif_tps_data_tc0_credit_max_i[11:0]"
* port="pif_tps_data_tc0_credit_max_i[15:0]"
*/
/* register address for bitfield data_tc{t}_credit_max[b:0] */
/* register address for bitfield data_tc{t}_credit_max[f:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK 0x0fff0000
/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSKN 0xf000ffff
/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
/* bitmask for bitfield data_tc{t}_credit_max[f:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK 0xffff0000
/* inverted bitmask for bitfield data_tc{t}_credit_max[f:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSKN 0x0000ffff
/* lower bit position of bitfield data_tc{t}_credit_max[f:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT 16
/* width of bitfield data_tc{t}_credit_max[b:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_WIDTH 12
/* default value of bitfield data_tc{t}_credit_max[b:0] */
/* width of bitfield data_tc{t}_credit_max[f:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_WIDTH 16
/* default value of bitfield data_tc{t}_credit_max[f:0] */
#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0
/* tx data_tc{t}_weight[8:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
/* tx data_tc{t}_weight[e:0] bitfield definitions
* preprocessor definitions for the bitfield "data_tc{t}_weight[e:0]".
* parameter: tc {t} | stride size 0x4 | range [0, 7]
* port="pif_tps_data_tc0_weight_i[8:0]"
* port="pif_tps_data_tc0_weight_i[14:0]"
*/
/* register address for bitfield data_tc{t}_weight[8:0] */
/* register address for bitfield data_tc{t}_weight[e:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4)
/* bitmask for bitfield data_tc{t}_weight[8:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSK 0x000001ff
/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSKN 0xfffffe00
/* lower bit position of bitfield data_tc{t}_weight[8:0] */
/* bitmask for bitfield data_tc{t}_weight[e:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSK 0x00007fff
/* inverted bitmask for bitfield data_tc{t}_weight[e:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSKN 0xffff8000
/* lower bit position of bitfield data_tc{t}_weight[e:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT 0
/* width of bitfield data_tc{t}_weight[8:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH 9
/* default value of bitfield data_tc{t}_weight[8:0] */
/* width of bitfield data_tc{t}_weight[e:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH 15
/* default value of bitfield data_tc{t}_weight[e:0] */
#define HW_ATL2_TPS_DATA_TCTWEIGHT_DEFAULT 0x0
/* tx interrupt moderation control register definitions
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment