Commit a7741713 authored by David S. Miller's avatar David S. Miller

Merge branch 'thunderx-fixes'

Sunil Goutham says:

====================
net: thunderx: Miscellaneous fixes

This patchset includes fixes for incorrect LMAC credits,
unreliable driver statistics, memory leak upon interface
down e.t.c

Changes from v1:
- As suggested replaced bit shifting with BIT() macro
  in the patch 'Fix configuration of L3/L4 length checking'.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b71de936 c94acf80
......@@ -47,7 +47,7 @@
/* Min/Max packet size */
#define NIC_HW_MIN_FRS 64
#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */
#define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */
/* Max pkinds */
#define NIC_MAX_PKIND 16
......@@ -178,11 +178,11 @@ enum tx_stats_reg_offset {
struct nicvf_hw_stats {
u64 rx_bytes;
u64 rx_frames;
u64 rx_ucast_frames;
u64 rx_bcast_frames;
u64 rx_mcast_frames;
u64 rx_fcs_errors;
u64 rx_l2_errors;
u64 rx_drops;
u64 rx_drop_red;
u64 rx_drop_red_bytes;
u64 rx_drop_overrun;
......@@ -191,6 +191,19 @@ struct nicvf_hw_stats {
u64 rx_drop_mcast;
u64 rx_drop_l3_bcast;
u64 rx_drop_l3_mcast;
u64 rx_fcs_errors;
u64 rx_l2_errors;
u64 tx_bytes;
u64 tx_frames;
u64 tx_ucast_frames;
u64 tx_bcast_frames;
u64 tx_mcast_frames;
u64 tx_drops;
};
struct nicvf_drv_stats {
/* CQE Rx errs */
u64 rx_bgx_truncated_pkts;
u64 rx_jabber_errs;
u64 rx_fcs_errs;
......@@ -216,34 +229,30 @@ struct nicvf_hw_stats {
u64 rx_l4_pclp;
u64 rx_truncated_pkts;
u64 tx_bytes_ok;
u64 tx_ucast_frames_ok;
u64 tx_bcast_frames_ok;
u64 tx_mcast_frames_ok;
u64 tx_drops;
};
struct nicvf_drv_stats {
/* Rx */
u64 rx_frames_ok;
u64 rx_frames_64;
u64 rx_frames_127;
u64 rx_frames_255;
u64 rx_frames_511;
u64 rx_frames_1023;
u64 rx_frames_1518;
u64 rx_frames_jumbo;
u64 rx_drops;
/* CQE Tx errs */
u64 tx_desc_fault;
u64 tx_hdr_cons_err;
u64 tx_subdesc_err;
u64 tx_max_size_exceeded;
u64 tx_imm_size_oflow;
u64 tx_data_seq_err;
u64 tx_mem_seq_err;
u64 tx_lock_viol;
u64 tx_data_fault;
u64 tx_tstmp_conflict;
u64 tx_tstmp_timeout;
u64 tx_mem_fault;
u64 tx_csum_overlap;
u64 tx_csum_overflow;
/* driver debug stats */
u64 rcv_buffer_alloc_failures;
/* Tx */
u64 tx_frames_ok;
u64 tx_drops;
u64 tx_tso;
u64 tx_timeout;
u64 txq_stop;
u64 txq_wake;
struct u64_stats_sync syncp;
};
struct nicvf {
......@@ -282,7 +291,6 @@ struct nicvf {
u8 node;
u8 cpi_alg;
u16 mtu;
bool link_up;
u8 duplex;
u32 speed;
......@@ -298,7 +306,7 @@ struct nicvf {
/* Stats */
struct nicvf_hw_stats hw_stats;
struct nicvf_drv_stats drv_stats;
struct nicvf_drv_stats __percpu *drv_stats;
struct bgx_stats bgx_stats;
/* MSI-X */
......
......@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/of.h>
#include <linux/if_vlan.h>
#include "nic_reg.h"
#include "nic.h"
......@@ -260,18 +261,31 @@ static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
/* Update hardware min/max frame size */
static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
{
if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
dev_err(&nic->pdev->dev,
"Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
int bgx, lmac, lmac_cnt;
u64 lmac_credits;
if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS))
return 1;
}
new_frs += ETH_HLEN;
if (new_frs <= nic->pkind.maxlen)
return 0;
nic->pkind.maxlen = new_frs;
nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
lmac += bgx * MAX_LMAC_PER_BGX;
new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
/* Update corresponding LMAC credits */
lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8));
lmac_credits &= ~(0xFFFFFULL << 12);
lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12);
nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits);
/* Enforce MTU in HW
* This config is supported only from 88xx pass 2.0 onwards.
*/
if (!pass1_silicon(nic->pdev))
nic_reg_write(nic,
NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs);
return 0;
}
......@@ -464,7 +478,7 @@ static int nic_init_hw(struct nicpf *nic)
/* PKIND configuration */
nic->pkind.minlen = 0;
nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
nic->pkind.lenerr_en = 1;
nic->pkind.rx_hdr = 0;
nic->pkind.hdr_sl = 0;
......@@ -837,6 +851,7 @@ static int nic_reset_stat_counters(struct nicpf *nic,
nic_reg_write(nic, reg_addr, 0);
}
}
return 0;
}
......
......@@ -106,6 +106,7 @@
#define NIC_PF_MPI_0_2047_CFG (0x210000)
#define NIC_PF_RSSI_0_4097_RQ (0x220000)
#define NIC_PF_LMAC_0_7_CFG (0x240000)
#define NIC_PF_LMAC_0_7_CFG2 (0x240100)
#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
......
......@@ -36,11 +36,11 @@ struct nicvf_stat {
static const struct nicvf_stat nicvf_hw_stats[] = {
NICVF_HW_STAT(rx_bytes),
NICVF_HW_STAT(rx_frames),
NICVF_HW_STAT(rx_ucast_frames),
NICVF_HW_STAT(rx_bcast_frames),
NICVF_HW_STAT(rx_mcast_frames),
NICVF_HW_STAT(rx_fcs_errors),
NICVF_HW_STAT(rx_l2_errors),
NICVF_HW_STAT(rx_drops),
NICVF_HW_STAT(rx_drop_red),
NICVF_HW_STAT(rx_drop_red_bytes),
NICVF_HW_STAT(rx_drop_overrun),
......@@ -49,50 +49,59 @@ static const struct nicvf_stat nicvf_hw_stats[] = {
NICVF_HW_STAT(rx_drop_mcast),
NICVF_HW_STAT(rx_drop_l3_bcast),
NICVF_HW_STAT(rx_drop_l3_mcast),
NICVF_HW_STAT(rx_bgx_truncated_pkts),
NICVF_HW_STAT(rx_jabber_errs),
NICVF_HW_STAT(rx_fcs_errs),
NICVF_HW_STAT(rx_bgx_errs),
NICVF_HW_STAT(rx_prel2_errs),
NICVF_HW_STAT(rx_l2_hdr_malformed),
NICVF_HW_STAT(rx_oversize),
NICVF_HW_STAT(rx_undersize),
NICVF_HW_STAT(rx_l2_len_mismatch),
NICVF_HW_STAT(rx_l2_pclp),
NICVF_HW_STAT(rx_ip_ver_errs),
NICVF_HW_STAT(rx_ip_csum_errs),
NICVF_HW_STAT(rx_ip_hdr_malformed),
NICVF_HW_STAT(rx_ip_payload_malformed),
NICVF_HW_STAT(rx_ip_ttl_errs),
NICVF_HW_STAT(rx_l3_pclp),
NICVF_HW_STAT(rx_l4_malformed),
NICVF_HW_STAT(rx_l4_csum_errs),
NICVF_HW_STAT(rx_udp_len_errs),
NICVF_HW_STAT(rx_l4_port_errs),
NICVF_HW_STAT(rx_tcp_flag_errs),
NICVF_HW_STAT(rx_tcp_offset_errs),
NICVF_HW_STAT(rx_l4_pclp),
NICVF_HW_STAT(rx_truncated_pkts),
NICVF_HW_STAT(tx_bytes_ok),
NICVF_HW_STAT(tx_ucast_frames_ok),
NICVF_HW_STAT(tx_bcast_frames_ok),
NICVF_HW_STAT(tx_mcast_frames_ok),
NICVF_HW_STAT(rx_fcs_errors),
NICVF_HW_STAT(rx_l2_errors),
NICVF_HW_STAT(tx_bytes),
NICVF_HW_STAT(tx_frames),
NICVF_HW_STAT(tx_ucast_frames),
NICVF_HW_STAT(tx_bcast_frames),
NICVF_HW_STAT(tx_mcast_frames),
NICVF_HW_STAT(tx_drops),
};
static const struct nicvf_stat nicvf_drv_stats[] = {
NICVF_DRV_STAT(rx_frames_ok),
NICVF_DRV_STAT(rx_frames_64),
NICVF_DRV_STAT(rx_frames_127),
NICVF_DRV_STAT(rx_frames_255),
NICVF_DRV_STAT(rx_frames_511),
NICVF_DRV_STAT(rx_frames_1023),
NICVF_DRV_STAT(rx_frames_1518),
NICVF_DRV_STAT(rx_frames_jumbo),
NICVF_DRV_STAT(rx_drops),
NICVF_DRV_STAT(rx_bgx_truncated_pkts),
NICVF_DRV_STAT(rx_jabber_errs),
NICVF_DRV_STAT(rx_fcs_errs),
NICVF_DRV_STAT(rx_bgx_errs),
NICVF_DRV_STAT(rx_prel2_errs),
NICVF_DRV_STAT(rx_l2_hdr_malformed),
NICVF_DRV_STAT(rx_oversize),
NICVF_DRV_STAT(rx_undersize),
NICVF_DRV_STAT(rx_l2_len_mismatch),
NICVF_DRV_STAT(rx_l2_pclp),
NICVF_DRV_STAT(rx_ip_ver_errs),
NICVF_DRV_STAT(rx_ip_csum_errs),
NICVF_DRV_STAT(rx_ip_hdr_malformed),
NICVF_DRV_STAT(rx_ip_payload_malformed),
NICVF_DRV_STAT(rx_ip_ttl_errs),
NICVF_DRV_STAT(rx_l3_pclp),
NICVF_DRV_STAT(rx_l4_malformed),
NICVF_DRV_STAT(rx_l4_csum_errs),
NICVF_DRV_STAT(rx_udp_len_errs),
NICVF_DRV_STAT(rx_l4_port_errs),
NICVF_DRV_STAT(rx_tcp_flag_errs),
NICVF_DRV_STAT(rx_tcp_offset_errs),
NICVF_DRV_STAT(rx_l4_pclp),
NICVF_DRV_STAT(rx_truncated_pkts),
NICVF_DRV_STAT(tx_desc_fault),
NICVF_DRV_STAT(tx_hdr_cons_err),
NICVF_DRV_STAT(tx_subdesc_err),
NICVF_DRV_STAT(tx_max_size_exceeded),
NICVF_DRV_STAT(tx_imm_size_oflow),
NICVF_DRV_STAT(tx_data_seq_err),
NICVF_DRV_STAT(tx_mem_seq_err),
NICVF_DRV_STAT(tx_lock_viol),
NICVF_DRV_STAT(tx_data_fault),
NICVF_DRV_STAT(tx_tstmp_conflict),
NICVF_DRV_STAT(tx_tstmp_timeout),
NICVF_DRV_STAT(tx_mem_fault),
NICVF_DRV_STAT(tx_csum_overlap),
NICVF_DRV_STAT(tx_csum_overflow),
NICVF_DRV_STAT(rcv_buffer_alloc_failures),
NICVF_DRV_STAT(tx_frames_ok),
NICVF_DRV_STAT(tx_tso),
NICVF_DRV_STAT(tx_drops),
NICVF_DRV_STAT(tx_timeout),
NICVF_DRV_STAT(txq_stop),
NICVF_DRV_STAT(txq_wake),
......@@ -278,8 +287,8 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct nicvf *nic = netdev_priv(netdev);
int stat;
int sqs;
int stat, tmp_stats;
int sqs, cpu;
nicvf_update_stats(nic);
......@@ -289,9 +298,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
for (stat = 0; stat < nicvf_n_hw_stats; stat++)
*(data++) = ((u64 *)&nic->hw_stats)
[nicvf_hw_stats[stat].index];
for (stat = 0; stat < nicvf_n_drv_stats; stat++)
*(data++) = ((u64 *)&nic->drv_stats)
for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
tmp_stats = 0;
for_each_possible_cpu(cpu)
tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
[nicvf_drv_stats[stat].index];
*(data++) = tmp_stats;
}
nicvf_get_qset_stats(nic, stats, &data);
......
......@@ -69,25 +69,6 @@ static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
return qidx;
}
static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
struct sk_buff *skb)
{
if (skb->len <= 64)
nic->drv_stats.rx_frames_64++;
else if (skb->len <= 127)
nic->drv_stats.rx_frames_127++;
else if (skb->len <= 255)
nic->drv_stats.rx_frames_255++;
else if (skb->len <= 511)
nic->drv_stats.rx_frames_511++;
else if (skb->len <= 1023)
nic->drv_stats.rx_frames_1023++;
else if (skb->len <= 1518)
nic->drv_stats.rx_frames_1518++;
else
nic->drv_stats.rx_frames_jumbo++;
}
/* The Cavium ThunderX network controller can *only* be found in SoCs
* containing the ThunderX ARM64 CPU implementation. All accesses to the device
* registers on this platform are implicitly strongly ordered with respect
......@@ -492,9 +473,6 @@ int nicvf_set_real_num_queues(struct net_device *netdev,
static int nicvf_init_resources(struct nicvf *nic)
{
int err;
union nic_mbx mbx = {};
mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
/* Enable Qset */
nicvf_qset_config(nic, true);
......@@ -507,14 +485,10 @@ static int nicvf_init_resources(struct nicvf *nic)
return err;
}
/* Send VF config done msg to PF */
nicvf_write_to_mbx(nic, &mbx);
return 0;
}
static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cmp_queue *cq,
struct cqe_send_t *cqe_tx,
int cqe_type, int budget,
unsigned int *tx_pkts, unsigned int *tx_bytes)
......@@ -536,7 +510,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
__func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
cqe_tx->sqe_ptr, hdr->subdesc_cnt);
nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
nicvf_check_cqe_tx_errs(nic, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
if (skb) {
/* Check for dummy descriptor used for HW TSO offload on 88xx */
......@@ -630,8 +604,6 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
return;
}
nicvf_set_rx_frame_cnt(nic, skb);
nicvf_set_rxhash(netdev, cqe_rx, skb);
skb_record_rx_queue(skb, rq_idx);
......@@ -703,7 +675,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
work_done++;
break;
case CQE_TYPE_SEND:
nicvf_snd_pkt_handler(netdev, cq,
nicvf_snd_pkt_handler(netdev,
(void *)cq_desc, CQE_TYPE_SEND,
budget, &tx_pkts, &tx_bytes);
tx_done++;
......@@ -740,7 +712,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
nic = nic->pnicvf;
if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
netif_tx_start_queue(txq);
nic->drv_stats.txq_wake++;
this_cpu_inc(nic->drv_stats->txq_wake);
if (netif_msg_tx_err(nic))
netdev_warn(netdev,
"%s: Transmit queue wakeup SQ%d\n",
......@@ -1084,7 +1056,7 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
netif_tx_stop_queue(txq);
nic->drv_stats.txq_stop++;
this_cpu_inc(nic->drv_stats->txq_stop);
if (netif_msg_tx_err(nic))
netdev_warn(netdev,
"%s: Transmit ring full, stopping SQ%d\n",
......@@ -1189,14 +1161,24 @@ int nicvf_stop(struct net_device *netdev)
return 0;
}
static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
{
union nic_mbx mbx = {};
mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
mbx.frs.max_frs = mtu;
mbx.frs.vf_id = nic->vf_id;
return nicvf_send_msg_to_pf(nic, &mbx);
}
int nicvf_open(struct net_device *netdev)
{
int err, qidx;
int cpu, err, qidx;
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
struct nicvf_cq_poll *cq_poll = NULL;
nic->mtu = netdev->mtu;
union nic_mbx mbx = {};
netif_carrier_off(netdev);
......@@ -1248,9 +1230,17 @@ int nicvf_open(struct net_device *netdev)
if (nic->sqs_mode)
nicvf_get_primary_vf_struct(nic);
/* Configure receive side scaling */
if (!nic->sqs_mode)
/* Configure receive side scaling and MTU */
if (!nic->sqs_mode) {
nicvf_rss_init(nic);
if (nicvf_update_hw_max_frs(nic, netdev->mtu))
goto cleanup;
/* Clear percpu stats */
for_each_possible_cpu(cpu)
memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
sizeof(struct nicvf_drv_stats));
}
err = nicvf_register_interrupts(nic);
if (err)
......@@ -1276,8 +1266,9 @@ int nicvf_open(struct net_device *netdev)
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
nic->drv_stats.txq_stop = 0;
nic->drv_stats.txq_wake = 0;
/* Send VF config done msg to PF */
mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
nicvf_write_to_mbx(nic, &mbx);
return 0;
cleanup:
......@@ -1297,17 +1288,6 @@ int nicvf_open(struct net_device *netdev)
return err;
}
static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
{
union nic_mbx mbx = {};
mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
mbx.frs.max_frs = mtu;
mbx.frs.vf_id = nic->vf_id;
return nicvf_send_msg_to_pf(nic, &mbx);
}
static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nicvf *nic = netdev_priv(netdev);
......@@ -1318,10 +1298,13 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
if (new_mtu < NIC_HW_MIN_FRS)
return -EINVAL;
netdev->mtu = new_mtu;
if (!netif_running(netdev))
return 0;
if (nicvf_update_hw_max_frs(nic, new_mtu))
return -EINVAL;
netdev->mtu = new_mtu;
nic->mtu = new_mtu;
return 0;
}
......@@ -1379,9 +1362,10 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
void nicvf_update_stats(struct nicvf *nic)
{
int qidx;
int qidx, cpu;
u64 tmp_stats = 0;
struct nicvf_hw_stats *stats = &nic->hw_stats;
struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
struct nicvf_drv_stats *drv_stats;
struct queue_set *qs = nic->qs;
#define GET_RX_STATS(reg) \
......@@ -1404,21 +1388,33 @@ void nicvf_update_stats(struct nicvf *nic)
stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
stats->tx_bytes = GET_TX_STATS(TX_OCTS);
stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
stats->tx_drops = GET_TX_STATS(TX_DROP);
drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
stats->tx_bcast_frames_ok +
stats->tx_mcast_frames_ok;
drv_stats->rx_frames_ok = stats->rx_ucast_frames +
/* On T88 pass 2.0, the dummy SQE added for TSO notification
* via CQE has 'dont_send' set. Hence HW drops the pkt pointed
* pointed by dummy SQE and results in tx_drops counter being
* incremented. Subtracting it from tx_tso counter will give
* exact tx_drops counter.
*/
if (nic->t88 && nic->hw_tso) {
for_each_possible_cpu(cpu) {
drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
tmp_stats += drv_stats->tx_tso;
}
stats->tx_drops = tmp_stats - stats->tx_drops;
}
stats->tx_frames = stats->tx_ucast_frames +
stats->tx_bcast_frames +
stats->tx_mcast_frames;
stats->rx_frames = stats->rx_ucast_frames +
stats->rx_bcast_frames +
stats->rx_mcast_frames;
drv_stats->rx_drops = stats->rx_drop_red +
stats->rx_drops = stats->rx_drop_red +
stats->rx_drop_overrun;
drv_stats->tx_drops = stats->tx_drops;
/* Update RQ and SQ stats */
for (qidx = 0; qidx < qs->rq_cnt; qidx++)
......@@ -1432,18 +1428,17 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
{
struct nicvf *nic = netdev_priv(netdev);
struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
nicvf_update_stats(nic);
stats->rx_bytes = hw_stats->rx_bytes;
stats->rx_packets = drv_stats->rx_frames_ok;
stats->rx_dropped = drv_stats->rx_drops;
stats->rx_packets = hw_stats->rx_frames;
stats->rx_dropped = hw_stats->rx_drops;
stats->multicast = hw_stats->rx_mcast_frames;
stats->tx_bytes = hw_stats->tx_bytes_ok;
stats->tx_packets = drv_stats->tx_frames_ok;
stats->tx_dropped = drv_stats->tx_drops;
stats->tx_bytes = hw_stats->tx_bytes;
stats->tx_packets = hw_stats->tx_frames;
stats->tx_dropped = hw_stats->tx_drops;
return stats;
}
......@@ -1456,7 +1451,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
netdev_warn(dev, "%s: Transmit timed out, resetting\n",
dev->name);
nic->drv_stats.tx_timeout++;
this_cpu_inc(nic->drv_stats->tx_timeout);
schedule_work(&nic->reset_task);
}
......@@ -1590,6 +1585,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_netdev;
}
nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
if (!nic->drv_stats) {
err = -ENOMEM;
goto err_free_netdev;
}
err = nicvf_set_qset_resources(nic);
if (err)
goto err_free_netdev;
......@@ -1648,6 +1649,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nicvf_unregister_interrupts(nic);
err_free_netdev:
pci_set_drvdata(pdev, NULL);
if (nic->drv_stats)
free_percpu(nic->drv_stats);
free_netdev(netdev);
err_release_regions:
pci_release_regions(pdev);
......@@ -1675,6 +1678,8 @@ static void nicvf_remove(struct pci_dev *pdev)
unregister_netdev(pnetdev);
nicvf_unregister_interrupts(nic);
pci_set_drvdata(pdev, NULL);
if (nic->drv_stats)
free_percpu(nic->drv_stats);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
......
......@@ -104,7 +104,8 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
order);
if (!nic->rb_page) {
nic->drv_stats.rcv_buffer_alloc_failures++;
this_cpu_inc(nic->pnicvf->drv_stats->
rcv_buffer_alloc_failures);
return -ENOMEM;
}
nic->rb_page_offset = 0;
......@@ -270,7 +271,8 @@ static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
rbdr_idx, new_rb);
next_rbdr:
/* Re-enable RBDR interrupts only if buffer allocation is success */
if (!nic->rb_alloc_fail && rbdr->enable)
if (!nic->rb_alloc_fail && rbdr->enable &&
netif_running(nic->pnicvf->netdev))
nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
if (rbdr_idx)
......@@ -361,6 +363,8 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
{
struct sk_buff *skb;
if (!sq)
return;
if (!sq->dmem.base)
......@@ -371,6 +375,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
sq->dmem.q_len * TSO_HEADER_SIZE,
sq->tso_hdrs, sq->tso_hdrs_phys);
/* Free pending skbs in the queue */
smp_rmb();
while (sq->head != sq->tail) {
skb = (struct sk_buff *)sq->skbuff[sq->head];
if (skb)
dev_kfree_skb_any(skb);
sq->head++;
sq->head &= (sq->dmem.q_len - 1);
}
kfree(sq->skbuff);
nicvf_free_q_desc_mem(nic, &sq->dmem);
}
......@@ -483,9 +496,12 @@ static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
{
union nic_mbx mbx = {};
/* Reset all RXQ's stats */
/* Reset all RQ/SQ and VF stats */
mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
mbx.reset_stat.rx_stat_mask = 0x3FFF;
mbx.reset_stat.tx_stat_mask = 0x1F;
mbx.reset_stat.rq_stat_mask = 0xFFFF;
mbx.reset_stat.sq_stat_mask = 0xFFFF;
nicvf_send_msg_to_pf(nic, &mbx);
}
......@@ -538,9 +554,12 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
nicvf_send_msg_to_pf(nic, &mbx);
nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
if (!nic->sqs_mode)
if (!nic->sqs_mode && (qidx == 0)) {
/* Enable checking L3/L4 length and TCP/UDP checksums */
nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
(BIT(24) | BIT(23) | BIT(21)));
nicvf_config_vlan_stripping(nic, nic->netdev->features);
}
/* Enable Receive queue */
memset(&rq_cfg, 0, sizeof(struct rq_cfg));
......@@ -1029,7 +1048,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
/* For non-tunneled pkts, point this to L2 ethertype */
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
nic->drv_stats.tx_tso++;
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
}
......@@ -1161,7 +1180,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
nic->drv_stats.tx_tso++;
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
return 1;
}
......@@ -1422,8 +1441,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
/* Check for errors in the receive cmp.queue entry */
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{
struct nicvf_hw_stats *stats = &nic->hw_stats;
if (!cqe_rx->err_level && !cqe_rx->err_opcode)
return 0;
......@@ -1435,76 +1452,76 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
switch (cqe_rx->err_opcode) {
case CQ_RX_ERROP_RE_PARTIAL:
stats->rx_bgx_truncated_pkts++;
this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
break;
case CQ_RX_ERROP_RE_JABBER:
stats->rx_jabber_errs++;
this_cpu_inc(nic->drv_stats->rx_jabber_errs);
break;
case CQ_RX_ERROP_RE_FCS:
stats->rx_fcs_errs++;
this_cpu_inc(nic->drv_stats->rx_fcs_errs);
break;
case CQ_RX_ERROP_RE_RX_CTL:
stats->rx_bgx_errs++;
this_cpu_inc(nic->drv_stats->rx_bgx_errs);
break;
case CQ_RX_ERROP_PREL2_ERR:
stats->rx_prel2_errs++;
this_cpu_inc(nic->drv_stats->rx_prel2_errs);
break;
case CQ_RX_ERROP_L2_MAL:
stats->rx_l2_hdr_malformed++;
this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
break;
case CQ_RX_ERROP_L2_OVERSIZE:
stats->rx_oversize++;
this_cpu_inc(nic->drv_stats->rx_oversize);
break;
case CQ_RX_ERROP_L2_UNDERSIZE:
stats->rx_undersize++;
this_cpu_inc(nic->drv_stats->rx_undersize);
break;
case CQ_RX_ERROP_L2_LENMISM:
stats->rx_l2_len_mismatch++;
this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
break;
case CQ_RX_ERROP_L2_PCLP:
stats->rx_l2_pclp++;
this_cpu_inc(nic->drv_stats->rx_l2_pclp);
break;
case CQ_RX_ERROP_IP_NOT:
stats->rx_ip_ver_errs++;
this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
break;
case CQ_RX_ERROP_IP_CSUM_ERR:
stats->rx_ip_csum_errs++;
this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
break;
case CQ_RX_ERROP_IP_MAL:
stats->rx_ip_hdr_malformed++;
this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
break;
case CQ_RX_ERROP_IP_MALD:
stats->rx_ip_payload_malformed++;
this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
break;
case CQ_RX_ERROP_IP_HOP:
stats->rx_ip_ttl_errs++;
this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
break;
case CQ_RX_ERROP_L3_PCLP:
stats->rx_l3_pclp++;
this_cpu_inc(nic->drv_stats->rx_l3_pclp);
break;
case CQ_RX_ERROP_L4_MAL:
stats->rx_l4_malformed++;
this_cpu_inc(nic->drv_stats->rx_l4_malformed);
break;
case CQ_RX_ERROP_L4_CHK:
stats->rx_l4_csum_errs++;
this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
break;
case CQ_RX_ERROP_UDP_LEN:
stats->rx_udp_len_errs++;
this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
break;
case CQ_RX_ERROP_L4_PORT:
stats->rx_l4_port_errs++;
this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
break;
case CQ_RX_ERROP_TCP_FLAG:
stats->rx_tcp_flag_errs++;
this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
break;
case CQ_RX_ERROP_TCP_OFFSET:
stats->rx_tcp_offset_errs++;
this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
break;
case CQ_RX_ERROP_L4_PCLP:
stats->rx_l4_pclp++;
this_cpu_inc(nic->drv_stats->rx_l4_pclp);
break;
case CQ_RX_ERROP_RBDR_TRUNC:
stats->rx_truncated_pkts++;
this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
break;
}
......@@ -1512,53 +1529,52 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
}
/* Check for errors in the send cmp.queue entry */
int nicvf_check_cqe_tx_errs(struct nicvf *nic,
struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
{
struct cmp_queue_stats *stats = &cq->stats;
switch (cqe_tx->send_status) {
case CQ_TX_ERROP_GOOD:
stats->tx.good++;
return 0;
case CQ_TX_ERROP_DESC_FAULT:
stats->tx.desc_fault++;
this_cpu_inc(nic->drv_stats->tx_desc_fault);
break;
case CQ_TX_ERROP_HDR_CONS_ERR:
stats->tx.hdr_cons_err++;
this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
break;
case CQ_TX_ERROP_SUBDC_ERR:
stats->tx.subdesc_err++;
this_cpu_inc(nic->drv_stats->tx_subdesc_err);
break;
case CQ_TX_ERROP_MAX_SIZE_VIOL:
this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
break;
case CQ_TX_ERROP_IMM_SIZE_OFLOW:
stats->tx.imm_size_oflow++;
this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
break;
case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
stats->tx.data_seq_err++;
this_cpu_inc(nic->drv_stats->tx_data_seq_err);
break;
case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
stats->tx.mem_seq_err++;
this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
break;
case CQ_TX_ERROP_LOCK_VIOL:
stats->tx.lock_viol++;
this_cpu_inc(nic->drv_stats->tx_lock_viol);
break;
case CQ_TX_ERROP_DATA_FAULT:
stats->tx.data_fault++;
this_cpu_inc(nic->drv_stats->tx_data_fault);
break;
case CQ_TX_ERROP_TSTMP_CONFLICT:
stats->tx.tstmp_conflict++;
this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
break;
case CQ_TX_ERROP_TSTMP_TIMEOUT:
stats->tx.tstmp_timeout++;
this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
break;
case CQ_TX_ERROP_MEM_FAULT:
stats->tx.mem_fault++;
this_cpu_inc(nic->drv_stats->tx_mem_fault);
break;
case CQ_TX_ERROP_CK_OVERLAP:
stats->tx.csum_overlap++;
this_cpu_inc(nic->drv_stats->tx_csum_overlap);
break;
case CQ_TX_ERROP_CK_OFLOW:
stats->tx.csum_overflow++;
this_cpu_inc(nic->drv_stats->tx_csum_overflow);
break;
}
......
......@@ -158,6 +158,7 @@ enum CQ_TX_ERROP_E {
CQ_TX_ERROP_DESC_FAULT = 0x10,
CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
CQ_TX_ERROP_SUBDC_ERR = 0x12,
CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
......@@ -171,25 +172,6 @@ enum CQ_TX_ERROP_E {
CQ_TX_ERROP_ENUM_LAST = 0x8a,
};
struct cmp_queue_stats {
struct tx_stats {
u64 good;
u64 desc_fault;
u64 hdr_cons_err;
u64 subdesc_err;
u64 imm_size_oflow;
u64 data_seq_err;
u64 mem_seq_err;
u64 lock_viol;
u64 data_fault;
u64 tstmp_conflict;
u64 tstmp_timeout;
u64 mem_fault;
u64 csum_overlap;
u64 csum_overflow;
} tx;
} ____cacheline_aligned_in_smp;
enum RQ_SQ_STATS {
RQ_SQ_STATS_OCTS,
RQ_SQ_STATS_PKTS,
......@@ -241,7 +223,6 @@ struct cmp_queue {
spinlock_t lock; /* lock to serialize processing CQEs */
void *desc;
struct q_desc_mem dmem;
struct cmp_queue_stats stats;
int irq;
} ____cacheline_aligned_in_smp;
......@@ -336,6 +317,5 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
int nicvf_check_cqe_tx_errs(struct nicvf *nic,
struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
#endif /* NICVF_QUEUES_H */
......@@ -1242,8 +1242,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
bgx->bgx_id =
(pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
bgx->bgx_id = (pci_resource_start(pdev,
PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
bgx->max_lmac = MAX_LMAC_PER_BGX;
bgx_vnic[bgx->bgx_id] = bgx;
......
......@@ -28,6 +28,8 @@
#define MAX_DMAC_PER_LMAC 8
#define MAX_FRAME_SIZE 9216
#define BGX_ID_MASK 0x3
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
/* Registers */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment