Commit 4f1b4da5 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-atlantic-various-features'

Mark Starovoytov says:

====================
net: atlantic: various features

This patchset adds more features for Atlantic NICs:
 * media detect;
 * additional per-queue stats;
 * PTP stats;
 * ipv6 support for TCP LSO and UDP GSO;
 * 64-bit operations;
 * A0 ntuple filters;
 * MAC temperature (hwmon).

This work is a joint effort of Marvell developers.

v3:
 * reworked patches related to stats:
   . fixed u64_stats_update_* usage;
   . use simple assignment in _get_stats / _fill_stats_data;
   . made _get_sw_stats / _fill_stats_data return count as return value;
   . split rx and tx per-queue stats;

v2: https://patchwork.ozlabs.org/cover/1329652/
 * removed media detect feature (will be reworked and submitted later);
 * removed irq counter from stats;
 * use u64_stats_update_* to protect 64-bit stats;
 * use io-64-nonatomic-lo-hi.h for readq/writeq fallbacks;

v1: https://patchwork.ozlabs.org/cover/1327894/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5c5b7581 8dcf2ad3
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2014-2019 aQuantia Corporation. */
/* Atlantic Network Driver
*
* Copyright (C) 2014-2019 aQuantia Corporation
* Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_drvinfo.c: Definition of common code for firmware info in sys.*/
......@@ -12,32 +16,51 @@
#include <linux/uaccess.h>
#include "aq_drvinfo.h"
#include "aq_nic.h"
#if IS_REACHABLE(CONFIG_HWMON)
static const char * const atl_temp_label[] = {
"PHY Temperature",
"MAC Temperature",
};
static int aq_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *value)
{
struct aq_nic_s *aq_nic = dev_get_drvdata(dev);
int err = 0;
int temp;
int err;
if (!aq_nic)
return -EIO;
if (type != hwmon_temp)
if (type != hwmon_temp || attr != hwmon_temp_input)
return -EOPNOTSUPP;
switch (channel) {
case 0:
if (!aq_nic->aq_fw_ops->get_phy_temp)
return -EOPNOTSUPP;
switch (attr) {
case hwmon_temp_input:
err = aq_nic->aq_fw_ops->get_phy_temp(aq_nic->aq_hw, &temp);
*value = temp;
return err;
break;
case 1:
if (!aq_nic->aq_fw_ops->get_mac_temp &&
!aq_nic->aq_hw_ops->hw_get_mac_temp)
return -EOPNOTSUPP;
if (aq_nic->aq_fw_ops->get_mac_temp)
err = aq_nic->aq_fw_ops->get_mac_temp(aq_nic->aq_hw, &temp);
else
err = aq_nic->aq_hw_ops->hw_get_mac_temp(aq_nic->aq_hw, &temp);
*value = temp;
break;
default:
return -EOPNOTSUPP;
}
return err;
}
static int aq_hwmon_read_string(struct device *dev,
......@@ -49,28 +72,32 @@ static int aq_hwmon_read_string(struct device *dev,
if (!aq_nic)
return -EIO;
if (type != hwmon_temp)
if (type != hwmon_temp || attr != hwmon_temp_label)
return -EOPNOTSUPP;
if (!aq_nic->aq_fw_ops->get_phy_temp)
if (channel < ARRAY_SIZE(atl_temp_label))
*str = atl_temp_label[channel];
else
return -EOPNOTSUPP;
switch (attr) {
case hwmon_temp_label:
*str = "PHY Temperature";
return 0;
default:
return -EOPNOTSUPP;
}
}
static umode_t aq_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type,
u32 attr, int channel)
{
const struct aq_nic_s *nic = data;
if (type != hwmon_temp)
return 0;
if (channel == 0 && !nic->aq_fw_ops->get_phy_temp)
return 0;
else if (channel == 1 && !nic->aq_fw_ops->get_mac_temp &&
!nic->aq_hw_ops->hw_get_mac_temp)
return 0;
switch (attr) {
case hwmon_temp_input:
case hwmon_temp_label:
......@@ -87,6 +114,7 @@ static const struct hwmon_ops aq_hwmon_ops = {
};
static u32 aq_hwmon_temp_config[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
0,
};
......
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2014-2017 aQuantia Corporation. */
/* Atlantic Network Driver
*
* Copyright (C) 2014-2019 aQuantia Corporation
* Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_drvinfo.h: Declaration of common code for firmware info in sys.*/
#ifndef AQ_DRVINFO_H
#define AQ_DRVINFO_H
#include "aq_nic.h"
#include "aq_hw.h"
#include "hw_atl/hw_atl_utils.h"
struct net_device;
int aq_drvinfo_init(struct net_device *ndev);
......
......@@ -89,13 +89,19 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
"InDroppedDma",
};
static const char * const aq_ethtool_queue_stat_names[] = {
static const char * const aq_ethtool_queue_rx_stat_names[] = {
"%sQueue[%d] InPackets",
"%sQueue[%d] OutPackets",
"%sQueue[%d] Restarts",
"%sQueue[%d] InJumboPackets",
"%sQueue[%d] InLroPackets",
"%sQueue[%d] InErrors",
"%sQueue[%d] AllocFails",
"%sQueue[%d] SkbAllocFails",
"%sQueue[%d] Polls",
};
static const char * const aq_ethtool_queue_tx_stat_names[] = {
"%sQueue[%d] OutPackets",
"%sQueue[%d] Restarts",
};
#if IS_ENABLED(CONFIG_MACSEC)
......@@ -164,11 +170,17 @@ static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = {
static u32 aq_ethtool_n_stats(struct net_device *ndev)
{
const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names);
const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names);
struct aq_nic_s *nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(nic);
u32 n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs *
cfg->tcs;
(rx_stat_cnt + tx_stat_cnt) * cfg->vecs * cfg->tcs;
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
n_stats += rx_stat_cnt * aq_ptp_get_ring_cnt(nic, ATL_RING_RX) +
tx_stat_cnt * aq_ptp_get_ring_cnt(nic, ATL_RING_TX);
#endif
#if IS_ENABLED(CONFIG_MACSEC)
if (nic->macsec_cfg) {
......@@ -192,6 +204,9 @@ static void aq_ethtool_stats(struct net_device *ndev,
memset(data, 0, aq_ethtool_n_stats(ndev) * sizeof(u64));
data = aq_nic_get_stats(aq_nic, data);
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
data = aq_ptp_get_stats(aq_nic, data);
#endif
#if IS_ENABLED(CONFIG_MACSEC)
data = aq_macsec_get_stats(aq_nic, data);
#endif
......@@ -237,7 +252,8 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
switch (stringset) {
case ETH_SS_STATS: {
const int stat_cnt = ARRAY_SIZE(aq_ethtool_queue_stat_names);
const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names);
const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names);
char tc_string[8];
int tc;
......@@ -251,15 +267,51 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
snprintf(tc_string, 8, "TC%d ", tc);
for (i = 0; i < cfg->vecs; i++) {
for (si = 0; si < stat_cnt; si++) {
for (si = 0; si < rx_stat_cnt; si++) {
snprintf(p, ETH_GSTRING_LEN,
aq_ethtool_queue_rx_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
p += ETH_GSTRING_LEN;
}
for (si = 0; si < tx_stat_cnt; si++) {
snprintf(p, ETH_GSTRING_LEN,
aq_ethtool_queue_stat_names[si],
aq_ethtool_queue_tx_stat_names[si],
tc_string,
AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
p += ETH_GSTRING_LEN;
}
}
}
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
if (nic->aq_ptp) {
const int rx_ring_cnt = aq_ptp_get_ring_cnt(nic, ATL_RING_RX);
const int tx_ring_cnt = aq_ptp_get_ring_cnt(nic, ATL_RING_TX);
unsigned int ptp_ring_idx =
aq_ptp_ring_idx(nic->aq_nic_cfg.tc_mode);
snprintf(tc_string, 8, "PTP ");
for (i = 0; i < max(rx_ring_cnt, tx_ring_cnt); i++) {
for (si = 0; si < rx_stat_cnt; si++) {
snprintf(p, ETH_GSTRING_LEN,
aq_ethtool_queue_rx_stat_names[si],
tc_string,
i ? PTP_HWST_RING_IDX : ptp_ring_idx);
p += ETH_GSTRING_LEN;
}
if (i >= tx_ring_cnt)
continue;
for (si = 0; si < tx_stat_cnt; si++) {
snprintf(p, ETH_GSTRING_LEN,
aq_ethtool_queue_tx_stat_names[si],
tc_string,
i ? PTP_HWST_RING_IDX : ptp_ring_idx);
p += ETH_GSTRING_LEN;
}
}
}
#endif
#if IS_ENABLED(CONFIG_MACSEC)
if (!nic->macsec_cfg)
break;
......
......@@ -36,6 +36,8 @@ enum aq_tc_mode {
(AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU
#define AQ_FRAC_PER_NS 0x100000000LL
/* Used for rate to Mbps conversion */
#define AQ_MBPS_DIVISOR 125000 /* 1000000 / 8 */
......@@ -65,6 +67,7 @@ struct aq_hw_caps_s {
u8 rx_rings;
bool flow_control;
bool is_64_dma;
bool op64bit;
u32 priv_data_len;
};
......@@ -330,6 +333,8 @@ struct aq_hw_ops {
int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
int (*hw_set_loopback)(struct aq_hw_s *self, u32 mode, bool enable);
int (*hw_get_mac_temp)(struct aq_hw_s *self, u32 *temp);
};
struct aq_fw_ops {
......@@ -352,6 +357,8 @@ struct aq_fw_ops {
int (*update_stats)(struct aq_hw_s *self);
int (*get_mac_temp)(struct aq_hw_s *self, int *temp);
int (*get_phy_temp)(struct aq_hw_s *self, int *temp);
u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
/* Atlantic Network Driver
*
* Copyright (C) 2014-2019 aQuantia Corporation
* Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_hw_utils.c: Definitions of helper functions used across
......@@ -9,6 +10,9 @@
*/
#include "aq_hw_utils.h"
#include <linux/io-64-nonatomic-lo-hi.h>
#include "aq_hw.h"
#include "aq_nic.h"
......@@ -37,9 +41,8 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
{
u32 value = readl(hw->mmio + reg);
if ((~0U) == value &&
(~0U) == readl(hw->mmio +
hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr))
if (value == U32_MAX &&
readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX)
aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
return value;
......@@ -56,13 +59,28 @@ void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value)
*/
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg)
{
u64 value = aq_hw_read_reg(hw, reg);
u64 value = U64_MAX;
value |= (u64)aq_hw_read_reg(hw, reg + 4) << 32;
if (hw->aq_nic_cfg->aq_hw_caps->op64bit)
value = readq(hw->mmio + reg);
else
value = lo_hi_readq(hw->mmio + reg);
if (value == U64_MAX &&
readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX)
aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
return value;
}
void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value)
{
if (hw->aq_nic_cfg->aq_hw_caps->op64bit)
writeq(value, hw->mmio + reg);
else
lo_hi_writeq(value, hw->mmio + reg);
}
int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
/* Atlantic Network Driver
*
* Copyright (C) 2014-2019 aQuantia Corporation
* Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_hw_utils.h: Declaration of helper functions used across hardware
......@@ -33,6 +34,7 @@ u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift);
u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
int aq_hw_num_tcs(struct aq_hw_s *hw);
int aq_hw_q_per_tc(struct aq_hw_s *hw);
......
......@@ -371,7 +371,7 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->features = aq_hw_caps->hw_features;
self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_RXHASH | NETIF_F_SG |
NETIF_F_LRO | NETIF_F_TSO;
NETIF_F_LRO | NETIF_F_TSO | NETIF_F_TSO6;
self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
......@@ -907,13 +907,13 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
aq_vec && self->aq_vecs > i;
++i, aq_vec = self->aq_vec[i]) {
data += count;
aq_vec_get_sw_stats(aq_vec, tc, data, &count);
count = aq_vec_get_sw_stats(aq_vec, tc, data);
}
}
data += count;
err_exit:;
err_exit:
return data;
}
......
......@@ -81,6 +81,8 @@ struct aq_ptp_s {
bool extts_pin_enabled;
u64 last_sync1588_ts;
bool a1_ptp;
};
struct ptp_tm_offset {
......@@ -782,8 +784,10 @@ int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw,
ring, frags);
if (err >= 0) {
u64_stats_update_begin(&ring->stats.tx.syncp);
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
u64_stats_update_end(&ring->stats.tx.syncp);
}
} else {
err = NETDEV_TX_BUSY;
......@@ -844,7 +848,7 @@ int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
if (!aq_ptp)
return 0;
err = aq_ring_init(&aq_ptp->ptp_tx);
err = aq_ring_init(&aq_ptp->ptp_tx, ATL_RING_TX);
if (err < 0)
goto err_exit;
err = aq_nic->aq_hw_ops->hw_ring_tx_init(aq_nic->aq_hw,
......@@ -853,7 +857,7 @@ int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
if (err < 0)
goto err_exit;
err = aq_ring_init(&aq_ptp->ptp_rx);
err = aq_ring_init(&aq_ptp->ptp_rx, ATL_RING_RX);
if (err < 0)
goto err_exit;
err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
......@@ -871,7 +875,7 @@ int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
if (err < 0)
goto err_rx_free;
err = aq_ring_init(&aq_ptp->hwts_rx);
err = aq_ring_init(&aq_ptp->hwts_rx, ATL_RING_RX);
if (err < 0)
goto err_rx_free;
err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
......@@ -945,21 +949,6 @@ void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
aq_ring_rx_deinit(&aq_ptp->ptp_rx);
}
#define PTP_8TC_RING_IDX 8
#define PTP_4TC_RING_IDX 16
#define PTP_HWST_RING_IDX 31
/* Index must be 8 (8 TCs) or 16 (4 TCs).
* It depends on Traffic Class mode.
*/
static unsigned int ptp_ring_idx(const enum aq_tc_mode tc_mode)
{
if (tc_mode == AQ_TC_MODE_8TCS)
return PTP_8TC_RING_IDX;
return PTP_4TC_RING_IDX;
}
int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
......@@ -971,7 +960,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
if (!aq_ptp)
return 0;
tx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
tx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
tx_ring_idx, &aq_nic->aq_nic_cfg);
......@@ -980,7 +969,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
goto err_exit;
}
rx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
rx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
rx_ring_idx, &aq_nic->aq_nic_cfg);
......@@ -1172,11 +1161,17 @@ static void aq_ptp_poll_sync_work_cb(struct work_struct *w);
int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
{
bool a1_ptp = ATL_HW_IS_CHIP_FEATURE(aq_nic->aq_hw, ATLANTIC);
struct hw_atl_utils_mbox mbox;
struct ptp_clock *clock;
struct aq_ptp_s *aq_ptp;
int err = 0;
if (!a1_ptp) {
aq_nic->aq_ptp = NULL;
return 0;
}
if (!aq_nic->aq_hw_ops->hw_get_ptp_ts) {
aq_nic->aq_ptp = NULL;
return 0;
......@@ -1203,6 +1198,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
}
aq_ptp->aq_nic = aq_nic;
aq_ptp->a1_ptp = a1_ptp;
spin_lock_init(&aq_ptp->ptp_lock);
spin_lock_init(&aq_ptp->ptp_ring_lock);
......@@ -1393,4 +1389,36 @@ static void aq_ptp_poll_sync_work_cb(struct work_struct *w)
schedule_delayed_work(&aq_ptp->poll_sync, timeout);
}
}
int aq_ptp_get_ring_cnt(struct aq_nic_s *aq_nic, const enum atl_ring_type ring_type)
{
if (!aq_nic->aq_ptp)
return 0;
/* Additional RX ring is allocated for PTP HWTS on A1 */
return (aq_nic->aq_ptp->a1_ptp && ring_type == ATL_RING_RX) ? 2 : 1;
}
u64 *aq_ptp_get_stats(struct aq_nic_s *aq_nic, u64 *data)
{
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
unsigned int count = 0U;
if (!aq_ptp)
return data;
count = aq_ring_fill_stats_data(&aq_ptp->ptp_rx, data);
data += count;
count = aq_ring_fill_stats_data(&aq_ptp->ptp_tx, data);
data += count;
if (aq_ptp->a1_ptp) {
/* Only Receive ring for HWTS */
count = aq_ring_fill_stats_data(&aq_ptp->hwts_rx, data);
data += count;
}
return data;
}
#endif
/* SPDX-License-Identifier: GPL-2.0-only */
/* Aquantia Corporation Network Driver
* Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved
/* Atlantic Network Driver
*
* Copyright (C) 2014-2019 aQuantia Corporation
* Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_ptp.h: Declaration of PTP functions.
......@@ -10,6 +12,23 @@
#include <linux/net_tstamp.h>
#include "aq_ring.h"
#define PTP_8TC_RING_IDX 8
#define PTP_4TC_RING_IDX 16
#define PTP_HWST_RING_IDX 31
/* Index must to be 8 (8 TCs) or 16 (4 TCs).
* It depends from Traffic Class mode.
*/
static inline unsigned int aq_ptp_ring_idx(const enum aq_tc_mode tc_mode)
{
if (tc_mode == AQ_TC_MODE_8TCS)
return PTP_8TC_RING_IDX;
return PTP_4TC_RING_IDX;
}
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
/* Common functions */
......@@ -55,6 +74,10 @@ struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
int aq_ptp_link_change(struct aq_nic_s *aq_nic);
/* PTP ring statistics */
int aq_ptp_get_ring_cnt(struct aq_nic_s *aq_nic, const enum atl_ring_type ring_type);
u64 *aq_ptp_get_stats(struct aq_nic_s *aq_nic, u64 *data);
#else
static inline int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
......
......@@ -70,24 +70,35 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
(PAGE_SIZE << order)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_flips++;
u64_stats_update_end(&self->stats.rx.syncp);
} else {
/* Buffer exhausted. We have other users and
* should release this page and realloc
*/
aq_free_rxpage(&rxbuf->rxdata,
aq_nic_get_dev(self->aq_nic));
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_losts++;
u64_stats_update_end(&self->stats.rx.syncp);
}
} else {
rxbuf->rxdata.pg_off = 0;
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_reuses++;
u64_stats_update_end(&self->stats.rx.syncp);
}
}
if (!rxbuf->rxdata.page) {
ret = aq_get_rxpage(&rxbuf->rxdata, order,
aq_nic_get_dev(self->aq_nic));
if (ret) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.alloc_fails++;
u64_stats_update_end(&self->stats.rx.syncp);
}
return ret;
}
......@@ -206,11 +217,17 @@ aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
return self;
}
int aq_ring_init(struct aq_ring_s *self)
int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
{
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
self->ring_type = ring_type;
if (self->ring_type == ATL_RING_RX)
u64_stats_init(&self->stats.rx.syncp);
else
u64_stats_init(&self->stats.tx.syncp);
return 0;
}
......@@ -238,7 +255,9 @@ void aq_ring_queue_wake(struct aq_ring_s *ring)
ring->idx))) {
netif_wake_subqueue(ndev,
AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
u64_stats_update_begin(&ring->stats.tx.syncp);
ring->stats.tx.queue_restarts++;
u64_stats_update_end(&ring->stats.tx.syncp);
}
}
......@@ -280,8 +299,10 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
}
if (unlikely(buff->is_eop)) {
u64_stats_update_begin(&self->stats.tx.syncp);
++self->stats.tx.packets;
self->stats.tx.bytes += buff->skb->len;
u64_stats_update_end(&self->stats.tx.syncp);
dev_kfree_skb_any(buff->skb);
}
......@@ -301,7 +322,9 @@ static void aq_rx_checksum(struct aq_ring_s *self,
return;
if (unlikely(buff->is_cso_err)) {
u64_stats_update_begin(&self->stats.rx.syncp);
++self->stats.rx.errors;
u64_stats_update_end(&self->stats.rx.syncp);
skb->ip_summed = CHECKSUM_NONE;
return;
}
......@@ -371,13 +394,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff_->is_cleaned = true;
} while (!buff_->is_eop);
u64_stats_update_begin(&self->stats.rx.syncp);
++self->stats.rx.errors;
u64_stats_update_end(&self->stats.rx.syncp);
continue;
}
}
if (buff->is_error) {
u64_stats_update_begin(&self->stats.rx.syncp);
++self->stats.rx.errors;
u64_stats_update_end(&self->stats.rx.syncp);
continue;
}
......@@ -392,6 +419,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb = build_skb(aq_buf_vaddr(&buff->rxdata),
AQ_CFG_RX_FRAME_MAX);
if (unlikely(!skb)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.skb_alloc_fails++;
u64_stats_update_end(&self->stats.rx.syncp);
err = -ENOMEM;
goto err_exit;
}
......@@ -405,6 +435,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
} else {
skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
if (unlikely(!skb)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.skb_alloc_fails++;
u64_stats_update_end(&self->stats.rx.syncp);
err = -ENOMEM;
goto err_exit;
}
......@@ -478,8 +511,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
: AQ_NIC_RING2QMAP(self->aq_nic,
self->idx));
u64_stats_update_begin(&self->stats.rx.syncp);
++self->stats.rx.packets;
self->stats.rx.bytes += skb->len;
u64_stats_update_end(&self->stats.rx.syncp);
napi_gro_receive(napi, skb);
}
......@@ -538,7 +573,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
void aq_ring_rx_deinit(struct aq_ring_s *self)
{
if (!self)
goto err_exit;
return;
for (; self->sw_head != self->sw_tail;
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
......@@ -546,14 +581,12 @@ void aq_ring_rx_deinit(struct aq_ring_s *self)
aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
}
err_exit:;
}
void aq_ring_free(struct aq_ring_s *self)
{
if (!self)
goto err_exit;
return;
kfree(self->buff_ring);
......@@ -561,6 +594,35 @@ void aq_ring_free(struct aq_ring_s *self)
dma_free_coherent(aq_nic_get_dev(self->aq_nic),
self->size * self->dx_size, self->dx_ring,
self->dx_ring_pa);
}
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
{
unsigned int count;
unsigned int start;
if (self->ring_type == ATL_RING_RX) {
/* This data should mimic aq_ethtool_queue_rx_stat_names structure */
do {
count = 0;
start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp);
data[count] = self->stats.rx.packets;
data[++count] = self->stats.rx.jumbo_packets;
data[++count] = self->stats.rx.lro_packets;
data[++count] = self->stats.rx.errors;
data[++count] = self->stats.rx.alloc_fails;
data[++count] = self->stats.rx.skb_alloc_fails;
data[++count] = self->stats.rx.polls;
} while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
} else {
/* This data should mimic aq_ethtool_queue_tx_stat_names structure */
do {
count = 0;
start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp);
data[count] = self->stats.tx.packets;
data[++count] = self->stats.tx.queue_restarts;
} while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start));
}
err_exit:;
return ++count;
}
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
/* Atlantic Network Driver
*
* Copyright (C) 2014-2019 aQuantia Corporation
* Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
......@@ -88,17 +89,22 @@ struct __packed aq_ring_buff_s {
};
struct aq_ring_stats_rx_s {
struct u64_stats_sync syncp; /* must be first */
u64 errors;
u64 packets;
u64 bytes;
u64 lro_packets;
u64 jumbo_packets;
u64 alloc_fails;
u64 skb_alloc_fails;
u64 polls;
u64 pg_losts;
u64 pg_flips;
u64 pg_reuses;
};
struct aq_ring_stats_tx_s {
struct u64_stats_sync syncp; /* must be first */
u64 errors;
u64 packets;
u64 bytes;
......@@ -110,6 +116,11 @@ union aq_ring_stats_s {
struct aq_ring_stats_tx_s tx;
};
enum atl_ring_type {
ATL_RING_TX,
ATL_RING_RX,
};
struct aq_ring_s {
struct aq_ring_buff_s *buff_ring;
u8 *dx_ring; /* descriptors ring, dma shared mem */
......@@ -124,6 +135,7 @@ struct aq_ring_s {
unsigned int page_order;
union aq_ring_stats_s stats;
dma_addr_t dx_ring_pa;
enum atl_ring_type ring_type;
};
struct aq_ring_param_s {
......@@ -163,7 +175,7 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic,
unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
int aq_ring_init(struct aq_ring_s *self);
int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self);
void aq_ring_update_queue_state(struct aq_ring_s *ring);
......@@ -181,4 +193,6 @@ struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
unsigned int size, unsigned int dx_size);
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
#endif /* AQ_RING_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
/* Atlantic Network Driver
*
* Copyright (C) 2014-2019 aQuantia Corporation
* Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
......@@ -44,6 +45,9 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
} else {
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
ring[AQ_VEC_RX_ID].stats.rx.polls++;
u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
if (self->aq_hw_ops->hw_ring_tx_head_update) {
err = self->aq_hw_ops->hw_ring_tx_head_update(
self->aq_hw,
......@@ -180,7 +184,7 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
if (err < 0)
goto err_exit;
......@@ -190,7 +194,7 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
if (err < 0)
goto err_exit;
err = aq_ring_init(&ring[AQ_VEC_RX_ID]);
err = aq_ring_init(&ring[AQ_VEC_RX_ID], ATL_RING_RX);
if (err < 0)
goto err_exit;
......@@ -349,59 +353,23 @@ cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
return &self->aq_ring_param.affinity_mask;
}
static void aq_vec_add_stats(struct aq_vec_s *self,
const unsigned int tc,
struct aq_ring_stats_rx_s *stats_rx,
struct aq_ring_stats_tx_s *stats_tx)
bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc)
{
struct aq_ring_s *ring = self->ring[tc];
if (tc < self->rx_rings) {
struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
stats_rx->packets += rx->packets;
stats_rx->bytes += rx->bytes;
stats_rx->errors += rx->errors;
stats_rx->jumbo_packets += rx->jumbo_packets;
stats_rx->lro_packets += rx->lro_packets;
stats_rx->pg_losts += rx->pg_losts;
stats_rx->pg_flips += rx->pg_flips;
stats_rx->pg_reuses += rx->pg_reuses;
}
if (tc < self->tx_rings) {
struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
stats_tx->packets += tx->packets;
stats_tx->bytes += tx->bytes;
stats_tx->errors += tx->errors;
stats_tx->queue_restarts += tx->queue_restarts;
}
return tc < self->rx_rings && tc < self->tx_rings;
}
int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data,
unsigned int *p_count)
unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data)
{
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
unsigned int count = 0U;
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
unsigned int count;
aq_vec_add_stats(self, tc, &stats_rx, &stats_tx);
/* This data should mimic aq_ethtool_queue_stat_names structure
*/
data[count] += stats_rx.packets;
data[++count] += stats_tx.packets;
data[++count] += stats_tx.queue_restarts;
data[++count] += stats_rx.jumbo_packets;
data[++count] += stats_rx.lro_packets;
data[++count] += stats_rx.errors;
WARN_ONCE(!aq_vec_is_valid_tc(self, tc),
"Invalid tc %u (#rx=%u, #tx=%u)\n",
tc, self->rx_rings, self->tx_rings);
if (!aq_vec_is_valid_tc(self, tc))
return 0;
if (p_count)
*p_count = ++count;
count = aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_RX_ID], data);
count += aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_TX_ID], data + count);
return 0;
return count;
}
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* aQuantia Corporation Network Driver
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
/* Atlantic Network Driver
*
* Copyright (C) 2014-2019 aQuantia Corporation
* Copyright (C) 2019-2020 Marvell International Ltd.
*/
/* File aq_vec.h: Definition of common structures for vector of Rx and Tx rings.
......@@ -35,7 +36,7 @@ void aq_vec_ring_free(struct aq_vec_s *self);
int aq_vec_start(struct aq_vec_s *self);
void aq_vec_stop(struct aq_vec_s *self);
cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self);
int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data,
unsigned int *p_count);
bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc);
unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data);
#endif /* AQ_VEC_H */
......@@ -18,6 +18,7 @@
#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
.is_64_dma = true, \
.op64bit = false, \
.msix_irqs = 4U, \
.irq_mask = ~0U, \
.vecs = HW_ATL_A0_RSS_MAX, \
......@@ -37,7 +38,9 @@
NETIF_F_RXHASH | \
NETIF_F_RXCSUM | \
NETIF_F_SG | \
NETIF_F_TSO, \
NETIF_F_TSO | \
NETIF_F_NTUPLE | \
NETIF_F_HW_VLAN_CTAG_FILTER, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_A0_MTU_JUMBO, \
......@@ -329,6 +332,7 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
err = -EINVAL;
goto err_exit;
}
h = (mac_addr[0] << 8) | (mac_addr[1]);
l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
......@@ -355,7 +359,6 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
int err = 0;
hw_atl_a0_hw_init_tx_path(self);
hw_atl_a0_hw_init_rx_path(self);
......@@ -751,6 +754,7 @@ static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
unsigned int packet_filter)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
unsigned int i = 0U;
hw_atl_rpfl2promiscuous_mode_en_set(self,
......@@ -759,14 +763,13 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
IS_FILTER_ENABLED(IFF_MULTICAST), 0);
hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
self->aq_nic_cfg->is_mc_list_enabled =
IS_FILTER_ENABLED(IFF_MULTICAST);
cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
hw_atl_rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled &&
(i <= self->aq_nic_cfg->mc_list_count)) ?
1U : 0U, i);
(cfg->is_mc_list_enabled &&
(i <= cfg->mc_list_count)) ? 1U : 0U,
i);
return aq_hw_err_from_flags(self);
}
......@@ -779,16 +782,15 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
[ETH_ALEN],
u32 count)
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
int err = 0;
if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) {
err = EBADRQC;
goto err_exit;
}
for (self->aq_nic_cfg->mc_list_count = 0U;
self->aq_nic_cfg->mc_list_count < count;
++self->aq_nic_cfg->mc_list_count) {
u32 i = self->aq_nic_cfg->mc_list_count;
for (cfg->mc_list_count = 0U; cfg->mc_list_count < count; ++cfg->mc_list_count) {
u32 i = cfg->mc_list_count;
u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
(ar_mac[i][4] << 8) | ar_mac[i][5];
......@@ -804,7 +806,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
HW_ATL_A0_MAC_MIN + i);
hw_atl_rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled),
(cfg->is_mc_list_enabled),
HW_ATL_A0_MAC_MIN + i);
}
......@@ -885,6 +887,63 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_fl3l4_clear(struct aq_hw_s *self,
struct aq_rx_filter_l3l4 *data)
{
u8 location = data->location;
if (!data->is_ipv6) {
hw_atl_rpfl3l4_cmd_clear(self, location);
hw_atl_rpf_l4_spd_set(self, 0U, location);
hw_atl_rpf_l4_dpd_set(self, 0U, location);
hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
} else {
int i;
for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
hw_atl_rpfl3l4_cmd_clear(self, location + i);
hw_atl_rpf_l4_spd_set(self, 0U, location + i);
hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
}
hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
}
return aq_hw_err_from_flags(self);
}
static int hw_atl_a0_hw_fl3l4_set(struct aq_hw_s *self,
struct aq_rx_filter_l3l4 *data)
{
u8 location = data->location;
hw_atl_a0_hw_fl3l4_clear(self, data);
if (data->cmd) {
if (!data->is_ipv6) {
hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
location,
data->ip_dst[0]);
hw_atl_rpfl3l4_ipv4_src_addr_set(self,
location,
data->ip_src[0]);
} else {
hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
location,
data->ip_dst);
hw_atl_rpfl3l4_ipv6_src_addr_set(self,
location,
data->ip_src);
}
}
hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
hw_atl_rpf_l4_spd_set(self, data->p_src, location);
hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
return aq_hw_err_from_flags(self);
}
const struct aq_hw_ops hw_atl_ops_a0 = {
.hw_soft_reset = hw_atl_utils_soft_reset,
.hw_prepare = hw_atl_utils_initfw,
......@@ -911,6 +970,7 @@ const struct aq_hw_ops hw_atl_ops_a0 = {
.hw_ring_rx_init = hw_atl_a0_hw_ring_rx_init,
.hw_ring_tx_init = hw_atl_a0_hw_ring_tx_init,
.hw_packet_filter_set = hw_atl_a0_hw_packet_filter_set,
.hw_filter_l3l4_set = hw_atl_a0_hw_fl3l4_set,
.hw_multicast_list_set = hw_atl_a0_hw_multicast_list_set,
.hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl_a0_hw_rss_set,
......
......@@ -20,6 +20,7 @@
#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
.is_64_dma = true, \
.op64bit = false, \
.msix_irqs = 8U, \
.irq_mask = ~0U, \
.vecs = HW_ATL_B0_RSS_MAX, \
......@@ -40,6 +41,7 @@
NETIF_F_RXHASH | \
NETIF_F_SG | \
NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_LRO | \
NETIF_F_NTUPLE | \
NETIF_F_HW_VLAN_CTAG_FILTER | \
......@@ -54,8 +56,6 @@
.mac_regs_count = 88, \
.hw_alive_check_addr = 0x10U
#define FRAC_PER_NS 0x100000000LL
const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
.media_type = AQ_HW_MEDIA_TYPE_FIBRE,
......@@ -1233,7 +1233,7 @@ static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns)
if (base_ns != nsi * NSEC_PER_SEC) {
s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC,
base_ns - nsi * NSEC_PER_SEC);
nsi_frac = div64_s64(FRAC_PER_NS * NSEC_PER_SEC, divisor);
nsi_frac = div64_s64(AQ_FRAC_PER_NS * NSEC_PER_SEC, divisor);
}
*ns = (u32)nsi;
......@@ -1246,23 +1246,23 @@ hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq,
{
s64 adj_fns_val;
s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy +
FRAC_PER_NS * ptp_adj_freq->ns_phy);
AQ_FRAC_PER_NS * ptp_adj_freq->ns_phy);
s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac +
FRAC_PER_NS * ptp_adj_freq->ns_mac);
s64 fault_in_sec_phy = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
s64 fault_in_sec_mac = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
AQ_FRAC_PER_NS * ptp_adj_freq->ns_mac);
s64 fault_in_sec_phy = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
s64 fault_in_sec_mac = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
/* MAC MCP counter freq is macfreq / 4 */
s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) *
4 * FRAC_PER_NS;
4 * AQ_FRAC_PER_NS;
diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow,
AQ_HW_MAC_COUNTER_HZ);
adj_fns_val = (ptp_adj_freq->fns_mac + FRAC_PER_NS *
adj_fns_val = (ptp_adj_freq->fns_mac + AQ_FRAC_PER_NS *
ptp_adj_freq->ns_mac) + diff_in_mcp_overflow;
ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, FRAC_PER_NS);
ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, AQ_FRAC_PER_NS);
ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj *
FRAC_PER_NS;
AQ_FRAC_PER_NS;
}
static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
......@@ -1581,6 +1581,48 @@ int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
return 0;
}
static u32 hw_atl_b0_ts_ready_and_latch_high_get(struct aq_hw_s *self)
{
if (hw_atl_ts_ready_get(self) && hw_atl_ts_ready_latch_high_get(self))
return 1;
return 0;
}
static int hw_atl_b0_get_mac_temp(struct aq_hw_s *self, u32 *temp)
{
bool ts_disabled;
int err;
u32 val;
u32 ts;
ts_disabled = (hw_atl_ts_power_down_get(self) == 1U);
if (ts_disabled) {
// Set AFE Temperature Sensor to on (off by default)
hw_atl_ts_power_down_set(self, 0U);
// Reset internal capacitors, biasing, and counters
hw_atl_ts_reset_set(self, 1);
hw_atl_ts_reset_set(self, 0);
}
err = readx_poll_timeout_atomic(hw_atl_b0_ts_ready_and_latch_high_get,
self, val, val == 1, 10000U, 500000U);
if (err)
return err;
ts = hw_atl_ts_data_get(self);
*temp = ts * ts * 16 / 100000 + 60 * ts - 83410;
if (ts_disabled) {
// Set AFE Temperature Sensor back to off
hw_atl_ts_power_down_set(self, 1U);
}
return 0;
}
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_soft_reset = hw_atl_utils_soft_reset,
.hw_prepare = hw_atl_utils_initfw,
......@@ -1637,4 +1679,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_offload = hw_atl_b0_hw_offload_set,
.hw_set_loopback = hw_atl_b0_set_loopback,
.hw_set_fc = hw_atl_b0_set_fc,
.hw_get_mac_temp = hw_atl_b0_get_mac_temp,
};
......@@ -13,6 +13,50 @@
#include "hw_atl_llh_internal.h"
#include "../aq_hw_utils.h"
void hw_atl_ts_reset_set(struct aq_hw_s *aq_hw, u32 val)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TS_RESET_ADR,
HW_ATL_TS_RESET_MSK,
HW_ATL_TS_RESET_SHIFT,
val);
}
void hw_atl_ts_power_down_set(struct aq_hw_s *aq_hw, u32 val)
{
aq_hw_write_reg_bit(aq_hw, HW_ATL_TS_POWER_DOWN_ADR,
HW_ATL_TS_POWER_DOWN_MSK,
HW_ATL_TS_POWER_DOWN_SHIFT,
val);
}
u32 hw_atl_ts_power_down_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_POWER_DOWN_ADR,
HW_ATL_TS_POWER_DOWN_MSK,
HW_ATL_TS_POWER_DOWN_SHIFT);
}
u32 hw_atl_ts_ready_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_READY_ADR,
HW_ATL_TS_READY_MSK,
HW_ATL_TS_READY_SHIFT);
}
u32 hw_atl_ts_ready_latch_high_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_READY_LATCH_HIGH_ADR,
HW_ATL_TS_READY_LATCH_HIGH_MSK,
HW_ATL_TS_READY_LATCH_HIGH_SHIFT);
}
u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_DATA_OUT_ADR,
HW_ATL_TS_DATA_OUT_MSK,
HW_ATL_TS_DATA_OUT_SHIFT);
}
/* global */
void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
u32 semaphore)
......
......@@ -16,6 +16,24 @@
struct aq_hw_s;
/* set temperature sense reset */
void hw_atl_ts_reset_set(struct aq_hw_s *aq_hw, u32 val);
/* set temperature sense power down */
void hw_atl_ts_power_down_set(struct aq_hw_s *aq_hw, u32 val);
/* get temperature sense power down */
u32 hw_atl_ts_power_down_get(struct aq_hw_s *aq_hw);
/* get temperature sense ready */
u32 hw_atl_ts_ready_get(struct aq_hw_s *aq_hw);
/* get temperature sense ready latch high */
u32 hw_atl_ts_ready_latch_high_get(struct aq_hw_s *aq_hw);
/* get temperature sense data */
u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw);
/* global */
/* set global microprocessor semaphore */
......
......@@ -12,6 +12,36 @@
#ifndef HW_ATL_LLH_INTERNAL_H
#define HW_ATL_LLH_INTERNAL_H
/* COM Temperature Sense Reset Bitfield Definitions */
#define HW_ATL_TS_RESET_ADR 0x00003100
#define HW_ATL_TS_RESET_MSK 0x00000004
#define HW_ATL_TS_RESET_SHIFT 2
#define HW_ATL_TS_RESET_WIDTH 1
/* COM Temperature Sense Power Down Bitfield Definitions */
#define HW_ATL_TS_POWER_DOWN_ADR 0x00003100
#define HW_ATL_TS_POWER_DOWN_MSK 0x00000001
#define HW_ATL_TS_POWER_DOWN_SHIFT 0
#define HW_ATL_TS_POWER_DOWN_WIDTH 1
/* COM Temperature Sense Ready Bitfield Definitions */
#define HW_ATL_TS_READY_ADR 0x00003120
#define HW_ATL_TS_READY_MSK 0x80000000
#define HW_ATL_TS_READY_SHIFT 31
#define HW_ATL_TS_READY_WIDTH 1
/* COM Temperature Sense Ready Latch High Bitfield Definitions */
#define HW_ATL_TS_READY_LATCH_HIGH_ADR 0x00003120
#define HW_ATL_TS_READY_LATCH_HIGH_MSK 0x40000000
#define HW_ATL_TS_READY_LATCH_HIGH_SHIFT 30
#define HW_ATL_TS_READY_LATCH_HIGH_WIDTH 1
/* COM Temperature Sense Data Out [B:0] Bitfield Definitions */
#define HW_ATL_TS_DATA_OUT_ADR 0x00003120
#define HW_ATL_TS_DATA_OUT_MSK 0x00000FFF
#define HW_ATL_TS_DATA_OUT_SHIFT 0
#define HW_ATL_TS_DATA_OUT_WIDTH 12
/* global microprocessor semaphore definitions
* base address: 0x000003a0
* parameter: semaphore {s} | stride size 0x4 | range [0, 15]
......
......@@ -1066,6 +1066,7 @@ const struct aq_fw_ops aq_fw_1x_ops = {
.set_state = hw_atl_utils_mpi_set_state,
.update_link_status = hw_atl_utils_mpi_get_link_status,
.update_stats = hw_atl_utils_update_stats,
.get_mac_temp = NULL,
.get_phy_temp = NULL,
.set_power = aq_fw1x_set_power,
.set_eee_rate = NULL,
......
......@@ -353,7 +353,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
/* Convert PHY temperature from 1/256 degree Celsius
* to 1/1000 degree Celsius.
*/
*temp = (temp_res & 0xFFFF) * 1000 / 256;
*temp = (int16_t)(temp_res & 0xFFFF) * 1000 / 256;
return 0;
}
......@@ -681,6 +681,7 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.set_state = aq_fw2x_set_state,
.update_link_status = aq_fw2x_update_link_status,
.update_stats = aq_fw2x_update_stats,
.get_mac_temp = NULL,
.get_phy_temp = aq_fw2x_get_phy_temp,
.set_power = aq_fw2x_set_power,
.set_eee_rate = aq_fw2x_set_eee_rate,
......
......@@ -21,6 +21,7 @@ static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
#define DEFAULT_BOARD_BASIC_CAPABILITIES \
.is_64_dma = true, \
.op64bit = true, \
.msix_irqs = 8U, \
.irq_mask = ~0U, \
.vecs = HW_ATL2_RSS_MAX, \
......
......@@ -379,6 +379,25 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self)
return 0;
}
static int aq_a2_fw_get_phy_temp(struct aq_hw_s *self, int *temp)
{
struct phy_health_monitor_s phy_health_monitor;
hw_atl2_shared_buffer_read_safe(self, phy_health_monitor,
&phy_health_monitor);
*temp = (int8_t)phy_health_monitor.phy_temperature * 1000;
return 0;
}
static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp)
{
/* There's only one temperature sensor on A2, use it for
* both MAC and PHY.
*/
return aq_a2_fw_get_phy_temp(self, temp);
}
static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed)
{
struct link_options_s link_options;
......@@ -510,6 +529,8 @@ const struct aq_fw_ops aq_a2_fw_ops = {
.set_state = aq_a2_fw_set_state,
.update_link_status = aq_a2_fw_update_link_status,
.update_stats = aq_a2_fw_update_stats,
.get_mac_temp = aq_a2_fw_get_mac_temp,
.get_phy_temp = aq_a2_fw_get_phy_temp,
.set_eee_rate = aq_a2_fw_set_eee_rate,
.get_eee_rate = aq_a2_fw_get_eee_rate,
.set_flow_control = aq_a2_fw_set_flow_control,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment