Commit e2d92218 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Peng Li says:

====================
add some new features and fix some bugs for HNS3 driver

This patchset adds some new features support and fixes some bugs:
[Patch 1/20] adds support to enable/disable vlan filter with ethtool
[Patch 2/20] disables VFs change rxvlan offload status
[Patch 3/20 - 13/120 fix bugs and refine some codes for packet
statistics, support query with both ifconfig and ethtool.
[Patch 14/20 - 20/20] fix some other bugs.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b60f699e 91f384f6
...@@ -274,6 +274,8 @@ struct hnae3_ae_dev { ...@@ -274,6 +274,8 @@ struct hnae3_ae_dev {
* Get firmware version * Get firmware version
* get_mdix_mode() * get_mdix_mode()
* Get media typr of phy * Get media typr of phy
* enable_vlan_filter()
* Enable vlan filter
* set_vlan_filter() * set_vlan_filter()
* Set vlan filter config of Ports * Set vlan filter config of Ports
* set_vf_vlan_filter() * set_vf_vlan_filter()
...@@ -382,6 +384,7 @@ struct hnae3_ae_ops { ...@@ -382,6 +384,7 @@ struct hnae3_ae_ops {
void (*get_mdix_mode)(struct hnae3_handle *handle, void (*get_mdix_mode)(struct hnae3_handle *handle,
u8 *tp_mdix_ctrl, u8 *tp_mdix); u8 *tp_mdix_ctrl, u8 *tp_mdix);
void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto, int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill); u16 vlan_id, bool is_kill);
int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
......
...@@ -247,6 +247,8 @@ static int hns3_nic_net_up(struct net_device *netdev) ...@@ -247,6 +247,8 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret) if (ret)
goto out_start_err; goto out_start_err;
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
return 0; return 0;
out_start_err: out_start_err:
...@@ -286,6 +288,9 @@ static void hns3_nic_net_down(struct net_device *netdev) ...@@ -286,6 +288,9 @@ static void hns3_nic_net_down(struct net_device *netdev)
const struct hnae3_ae_ops *ops; const struct hnae3_ae_ops *ops;
int i; int i;
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
/* stop ae_dev */ /* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops; ops = priv->ae_handle->ae_algo->ops;
if (ops->stop) if (ops->stop)
...@@ -1101,6 +1106,11 @@ static int hns3_nic_set_features(struct net_device *netdev, ...@@ -1101,6 +1106,11 @@ static int hns3_nic_set_features(struct net_device *netdev,
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
} }
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
h->ae_algo->ops->enable_vlan_filter(h, true);
else
h->ae_algo->ops->enable_vlan_filter(h, false);
changed = netdev->features ^ features; changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_CTAG_RX) { if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
if (features & NETIF_F_HW_VLAN_CTAG_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
...@@ -1121,6 +1131,7 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) ...@@ -1121,6 +1131,7 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{ {
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
int queue_num = priv->ae_handle->kinfo.num_tqps; int queue_num = priv->ae_handle->kinfo.num_tqps;
struct hnae3_handle *handle = priv->ae_handle;
struct hns3_enet_ring *ring; struct hns3_enet_ring *ring;
unsigned int start; unsigned int start;
unsigned int idx; unsigned int idx;
...@@ -1128,6 +1139,13 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) ...@@ -1128,6 +1139,13 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
u64 rx_bytes = 0; u64 rx_bytes = 0;
u64 tx_pkts = 0; u64 tx_pkts = 0;
u64 rx_pkts = 0; u64 rx_pkts = 0;
u64 tx_drop = 0;
u64 rx_drop = 0;
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
handle->ae_algo->ops->update_stats(handle, &netdev->stats);
for (idx = 0; idx < queue_num; idx++) { for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */ /* fetch the tx stats */
...@@ -1136,6 +1154,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) ...@@ -1136,6 +1154,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin_irq(&ring->syncp); start = u64_stats_fetch_begin_irq(&ring->syncp);
tx_bytes += ring->stats.tx_bytes; tx_bytes += ring->stats.tx_bytes;
tx_pkts += ring->stats.tx_pkts; tx_pkts += ring->stats.tx_pkts;
tx_drop += ring->stats.tx_busy;
tx_drop += ring->stats.sw_err_cnt;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */ /* fetch the rx stats */
...@@ -1144,6 +1164,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) ...@@ -1144,6 +1164,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin_irq(&ring->syncp); start = u64_stats_fetch_begin_irq(&ring->syncp);
rx_bytes += ring->stats.rx_bytes; rx_bytes += ring->stats.rx_bytes;
rx_pkts += ring->stats.rx_pkts; rx_pkts += ring->stats.rx_pkts;
rx_drop += ring->stats.non_vld_descs;
rx_drop += ring->stats.err_pkt_len;
rx_drop += ring->stats.l2_err;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
} }
...@@ -1159,8 +1182,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) ...@@ -1159,8 +1182,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
stats->rx_missed_errors = netdev->stats.rx_missed_errors; stats->rx_missed_errors = netdev->stats.rx_missed_errors;
stats->tx_errors = netdev->stats.tx_errors; stats->tx_errors = netdev->stats.tx_errors;
stats->rx_dropped = netdev->stats.rx_dropped; stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
stats->tx_dropped = netdev->stats.tx_dropped; stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
stats->collisions = netdev->stats.collisions; stats->collisions = netdev->stats.collisions;
stats->rx_over_errors = netdev->stats.rx_over_errors; stats->rx_over_errors = netdev->stats.rx_over_errors;
stats->rx_frame_errors = netdev->stats.rx_frame_errors; stats->rx_frame_errors = netdev->stats.rx_frame_errors;
...@@ -1390,6 +1413,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -1390,6 +1413,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
return ret; return ret;
} }
netdev->mtu = new_mtu;
/* if the netdev was running earlier, bring it up again */ /* if the netdev was running earlier, bring it up again */
if (if_running && hns3_nic_net_open(netdev)) if (if_running && hns3_nic_net_open(netdev))
ret = -EINVAL; ret = -EINVAL;
...@@ -1549,6 +1574,8 @@ static struct pci_driver hns3_driver = { ...@@ -1549,6 +1574,8 @@ static struct pci_driver hns3_driver = {
/* set default feature to hns3 */ /* set default feature to hns3 */
static void hns3_set_default_feature(struct net_device *netdev) static void hns3_set_default_feature(struct net_device *netdev)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev);
netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
...@@ -1577,12 +1604,15 @@ static void hns3_set_default_feature(struct net_device *netdev) ...@@ -1577,12 +1604,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM; NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM; NETIF_F_GSO_UDP_TUNNEL_CSUM;
if (!(h->flags & HNAE3_SUPPORT_VF))
netdev->hw_features |=
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
} }
static int hns3_alloc_buffer(struct hns3_enet_ring *ring, static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
......
...@@ -15,26 +15,25 @@ ...@@ -15,26 +15,25 @@
struct hns3_stats { struct hns3_stats {
char stats_string[ETH_GSTRING_LEN]; char stats_string[ETH_GSTRING_LEN];
int stats_size;
int stats_offset; int stats_offset;
}; };
/* tqp related stats */ /* tqp related stats */
#define HNS3_TQP_STAT(_string, _member) { \ #define HNS3_TQP_STAT(_string, _member) { \
.stats_string = _string, \ .stats_string = _string, \
.stats_size = FIELD_SIZEOF(struct ring_stats, _member), \ .stats_offset = offsetof(struct hns3_enet_ring, stats) +\
.stats_offset = offsetof(struct hns3_enet_ring, stats), \ offsetof(struct ring_stats, _member), \
} \ }
static const struct hns3_stats hns3_txq_stats[] = { static const struct hns3_stats hns3_txq_stats[] = {
/* Tx per-queue statistics */ /* Tx per-queue statistics */
HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt), HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt), HNS3_TQP_STAT("tx_dropped", sw_err_cnt),
HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt), HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
HNS3_TQP_STAT("tx_pkts", tx_pkts), HNS3_TQP_STAT("packets", tx_pkts),
HNS3_TQP_STAT("tx_bytes", tx_bytes), HNS3_TQP_STAT("bytes", tx_bytes),
HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt), HNS3_TQP_STAT("errors", tx_err_cnt),
HNS3_TQP_STAT("tx_restart_queue", restart_queue), HNS3_TQP_STAT("tx_wake", restart_queue),
HNS3_TQP_STAT("tx_busy", tx_busy), HNS3_TQP_STAT("tx_busy", tx_busy),
}; };
...@@ -42,24 +41,59 @@ static const struct hns3_stats hns3_txq_stats[] = { ...@@ -42,24 +41,59 @@ static const struct hns3_stats hns3_txq_stats[] = {
static const struct hns3_stats hns3_rxq_stats[] = { static const struct hns3_stats hns3_rxq_stats[] = {
/* Rx per-queue statistics */ /* Rx per-queue statistics */
HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt), HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt), HNS3_TQP_STAT("rx_dropped", sw_err_cnt),
HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt), HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
HNS3_TQP_STAT("rx_pkts", rx_pkts), HNS3_TQP_STAT("packets", rx_pkts),
HNS3_TQP_STAT("rx_bytes", rx_bytes), HNS3_TQP_STAT("bytes", rx_bytes),
HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt), HNS3_TQP_STAT("errors", rx_err_cnt),
HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt), HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len), HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs), HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
HNS3_TQP_STAT("rx_err_bd_num", err_bd_num), HNS3_TQP_STAT("err_bd_num", err_bd_num),
HNS3_TQP_STAT("rx_l2_err", l2_err), HNS3_TQP_STAT("l2_err", l2_err),
HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err), HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
}; };
#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
/* netdev stats */
#define HNS3_NETDEV_STAT(_string, _member) { \
.stats_string = _string, \
.stats_offset = offsetof(struct rtnl_link_stats64, _member) \
}
static const struct hns3_stats hns3_netdev_stats[] = {
/* Rx per-queue statistics */
HNS3_NETDEV_STAT("rx_packets", rx_packets),
HNS3_NETDEV_STAT("tx_packets", tx_packets),
HNS3_NETDEV_STAT("rx_bytes", rx_bytes),
HNS3_NETDEV_STAT("tx_bytes", tx_bytes),
HNS3_NETDEV_STAT("rx_errors", rx_errors),
HNS3_NETDEV_STAT("tx_errors", tx_errors),
HNS3_NETDEV_STAT("rx_dropped", rx_dropped),
HNS3_NETDEV_STAT("tx_dropped", tx_dropped),
HNS3_NETDEV_STAT("multicast", multicast),
HNS3_NETDEV_STAT("collisions", collisions),
HNS3_NETDEV_STAT("rx_length_errors", rx_length_errors),
HNS3_NETDEV_STAT("rx_over_errors", rx_over_errors),
HNS3_NETDEV_STAT("rx_crc_errors", rx_crc_errors),
HNS3_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
HNS3_NETDEV_STAT("rx_fifo_errors", rx_fifo_errors),
HNS3_NETDEV_STAT("rx_missed_errors", rx_missed_errors),
HNS3_NETDEV_STAT("tx_aborted_errors", tx_aborted_errors),
HNS3_NETDEV_STAT("tx_carrier_errors", tx_carrier_errors),
HNS3_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
HNS3_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
HNS3_NETDEV_STAT("tx_window_errors", tx_window_errors),
HNS3_NETDEV_STAT("rx_compressed", rx_compressed),
HNS3_NETDEV_STAT("tx_compressed", tx_compressed),
};
#define HNS3_NETDEV_STATS_COUNT ARRAY_SIZE(hns3_netdev_stats)
#define HNS3_SELF_TEST_TPYE_NUM 1 #define HNS3_SELF_TEST_TPYE_NUM 1
#define HNS3_NIC_LB_TEST_PKT_NUM 1 #define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0 #define HNS3_NIC_LB_TEST_RING_ID 0
...@@ -389,9 +423,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset) ...@@ -389,9 +423,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
} }
static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
u32 stat_count, u32 num_tqps) u32 stat_count, u32 num_tqps, const char *prefix)
{ {
#define MAX_PREFIX_SIZE (8 + 4) #define MAX_PREFIX_SIZE (6 + 4)
u32 size_left; u32 size_left;
u32 i, j; u32 i, j;
u32 n1; u32 n1;
...@@ -401,7 +435,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, ...@@ -401,7 +435,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0'; data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */ /* first, prepend the prefix string */
n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i); n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_",
prefix, i);
n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
size_left = (ETH_GSTRING_LEN - 1) - n1; size_left = (ETH_GSTRING_LEN - 1) - n1;
...@@ -417,14 +452,37 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, ...@@ -417,14 +452,37 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
{ {
struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hnae3_knic_private_info *kinfo = &handle->kinfo;
const char tx_prefix[] = "txq";
const char rx_prefix[] = "rxq";
/* get strings for Tx */ /* get strings for Tx */
data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
kinfo->num_tqps); kinfo->num_tqps, tx_prefix);
/* get strings for Rx */ /* get strings for Rx */
data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
kinfo->num_tqps); kinfo->num_tqps, rx_prefix);
return data;
}
static u8 *hns3_netdev_stats_get_strings(u8 *data)
{
int i;
/* get strings for netdev */
for (i = 0; i < HNS3_NETDEV_STATS_COUNT; i++) {
snprintf(data, ETH_GSTRING_LEN,
hns3_netdev_stats[i].stats_string);
data += ETH_GSTRING_LEN;
}
snprintf(data, ETH_GSTRING_LEN, "netdev_rx_dropped");
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "netdev_tx_dropped");
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "netdev_tx_timeout");
data += ETH_GSTRING_LEN;
return data; return data;
} }
...@@ -440,6 +498,7 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ...@@ -440,6 +498,7 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
switch (stringset) { switch (stringset) {
case ETH_SS_STATS: case ETH_SS_STATS:
buff = hns3_netdev_stats_get_strings(buff);
buff = hns3_get_strings_tqps(h, buff); buff = hns3_get_strings_tqps(h, buff);
h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff);
break; break;
...@@ -455,13 +514,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) ...@@ -455,13 +514,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hns3_enet_ring *ring; struct hns3_enet_ring *ring;
u8 *stat; u8 *stat;
u32 i; int i, j;
/* get stats for Tx */ /* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) { for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i].ring; ring = nic_priv->ring_data[i].ring;
for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) { for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
stat = (u8 *)ring + hns3_txq_stats[i].stats_offset; stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
*data++ = *(u64 *)stat; *data++ = *(u64 *)stat;
} }
} }
...@@ -469,8 +528,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) ...@@ -469,8 +528,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Rx */ /* get stats for Rx */
for (i = 0; i < kinfo->num_tqps; i++) { for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) { for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset; stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
*data++ = *(u64 *)stat; *data++ = *(u64 *)stat;
} }
} }
...@@ -478,6 +537,27 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) ...@@ -478,6 +537,27 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
return data; return data;
} }
static u64 *hns3_get_netdev_stats(struct net_device *netdev, u64 *data)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
const struct rtnl_link_stats64 *net_stats;
struct rtnl_link_stats64 temp;
u8 *stat;
int i;
net_stats = dev_get_stats(netdev, &temp);
for (i = 0; i < HNS3_NETDEV_STATS_COUNT; i++) {
stat = (u8 *)net_stats + hns3_netdev_stats[i].stats_offset;
*data++ = *(u64 *)stat;
}
*data++ = netdev->rx_dropped.counter;
*data++ = netdev->tx_dropped.counter;
*data++ = priv->tx_timeout_count;
return data;
}
/* hns3_get_stats - get detail statistics. /* hns3_get_stats - get detail statistics.
* @netdev: net device * @netdev: net device
* @stats: statistics info. * @stats: statistics info.
...@@ -494,7 +574,7 @@ static void hns3_get_stats(struct net_device *netdev, ...@@ -494,7 +574,7 @@ static void hns3_get_stats(struct net_device *netdev,
return; return;
} }
h->ae_algo->ops->update_stats(h, &netdev->stats); p = hns3_get_netdev_stats(netdev, p);
/* get per-queue stats */ /* get per-queue stats */
p = hns3_get_stats_tqps(h, p); p = hns3_get_stats_tqps(h, p);
......
...@@ -556,8 +556,6 @@ struct hclge_config_auto_neg_cmd { ...@@ -556,8 +556,6 @@ struct hclge_config_auto_neg_cmd {
u8 rsv[20]; u8 rsv[20];
}; };
#define HCLGE_MAC_MIN_MTU 64
#define HCLGE_MAC_MAX_MTU 9728
#define HCLGE_MAC_UPLINK_PORT 0x100 #define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd { struct hclge_config_max_frm_size_cmd {
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <net/rtnetlink.h> #include <net/rtnetlink.h>
#include "hclge_cmd.h" #include "hclge_cmd.h"
#include "hclge_dcb.h" #include "hclge_dcb.h"
...@@ -35,6 +36,7 @@ ...@@ -35,6 +36,7 @@
static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
enum hclge_mta_dmac_sel_type mta_mac_sel, enum hclge_mta_dmac_sel_type mta_mac_sel,
bool enable); bool enable);
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
...@@ -279,8 +281,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { ...@@ -279,8 +281,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
{"mac_tx_undersize_pkt_num", {"mac_tx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
{"mac_tx_overrsize_pkt_num", {"mac_tx_oversize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
{"mac_tx_64_oct_pkt_num", {"mac_tx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
{"mac_tx_65_127_oct_pkt_num", {"mac_tx_65_127_oct_pkt_num",
...@@ -293,8 +295,24 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { ...@@ -293,8 +295,24 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
{"mac_tx_1024_1518_oct_pkt_num", {"mac_tx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
{"mac_tx_1519_max_oct_pkt_num", {"mac_tx_1519_2047_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
{"mac_tx_2048_4095_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
{"mac_tx_4096_8191_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
{"mac_tx_8192_12287_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)},
{"mac_tx_8192_9216_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
{"mac_tx_9217_12287_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
{"mac_tx_12288_16383_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
{"mac_tx_1519_max_good_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
{"mac_tx_1519_max_bad_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
{"mac_rx_total_pkt_num", {"mac_rx_total_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
{"mac_rx_total_oct_num", {"mac_rx_total_oct_num",
...@@ -315,8 +333,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { ...@@ -315,8 +333,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
{"mac_rx_undersize_pkt_num", {"mac_rx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
{"mac_rx_overrsize_pkt_num", {"mac_rx_oversize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
{"mac_rx_64_oct_pkt_num", {"mac_rx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
{"mac_rx_65_127_oct_pkt_num", {"mac_rx_65_127_oct_pkt_num",
...@@ -329,33 +347,49 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { ...@@ -329,33 +347,49 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
{"mac_rx_1024_1518_oct_pkt_num", {"mac_rx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
{"mac_rx_1519_max_oct_pkt_num", {"mac_rx_1519_2047_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
{"mac_rx_2048_4095_oct_pkt_num",
{"mac_trans_fragment_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)}, {"mac_rx_4096_8191_oct_pkt_num",
{"mac_trans_undermin_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)}, {"mac_rx_8192_12287_oct_pkt_num",
{"mac_trans_jabber_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)},
HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)}, {"mac_rx_8192_9216_oct_pkt_num",
{"mac_trans_err_all_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)}, {"mac_rx_9217_12287_oct_pkt_num",
{"mac_trans_from_app_good_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)}, {"mac_rx_12288_16383_oct_pkt_num",
{"mac_trans_from_app_bad_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)}, {"mac_rx_1519_max_good_pkt_num",
{"mac_rcv_fragment_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)}, {"mac_rx_1519_max_bad_pkt_num",
{"mac_rcv_undermin_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
{"mac_rcv_jabber_pkt_num", {"mac_tx_fragment_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
{"mac_rcv_fcs_err_pkt_num", {"mac_tx_undermin_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
{"mac_rcv_send_app_good_pkt_num", {"mac_tx_jabber_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)}, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
{"mac_rcv_send_app_bad_pkt_num", {"mac_tx_err_all_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)} HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
{"mac_tx_from_app_good_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
{"mac_tx_from_app_bad_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
{"mac_rx_fragment_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
{"mac_rx_undermin_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
{"mac_rx_jabber_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
{"mac_rx_fcs_err_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
{"mac_rx_send_app_good_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
{"mac_rx_send_app_bad_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
}; };
static int hclge_64_bit_update_stats(struct hclge_dev *hdev) static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
...@@ -463,7 +497,7 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev) ...@@ -463,7 +497,7 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
static int hclge_mac_update_stats(struct hclge_dev *hdev) static int hclge_mac_update_stats(struct hclge_dev *hdev)
{ {
#define HCLGE_MAC_CMD_NUM 17 #define HCLGE_MAC_CMD_NUM 21
#define HCLGE_RTN_DATA_NUM 4 #define HCLGE_RTN_DATA_NUM 4
u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
...@@ -525,7 +559,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) ...@@ -525,7 +559,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret; return ret;
} }
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
le32_to_cpu(desc[0].data[4]); le32_to_cpu(desc[0].data[1]);
} }
for (i = 0; i < kinfo->num_tqps; i++) { for (i = 0; i < kinfo->num_tqps; i++) {
...@@ -545,7 +579,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) ...@@ -545,7 +579,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret; return ret;
} }
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
le32_to_cpu(desc[0].data[4]); le32_to_cpu(desc[0].data[1]);
} }
return 0; return 0;
...@@ -587,7 +621,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) ...@@ -587,7 +621,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) { for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q); struct hclge_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
tqp->index); tqp->index);
buff = buff + ETH_GSTRING_LEN; buff = buff + ETH_GSTRING_LEN;
} }
...@@ -595,7 +629,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) ...@@ -595,7 +629,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) { for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i], struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q); struct hclge_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
tqp->index); tqp->index);
buff = buff + ETH_GSTRING_LEN; buff = buff + ETH_GSTRING_LEN;
} }
...@@ -643,23 +677,22 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, ...@@ -643,23 +677,22 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num; net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
net_stats->rx_length_errors = net_stats->rx_length_errors =
hw_stats->mac_stats.mac_rx_undersize_pkt_num; hw_stats->mac_stats.mac_rx_undersize_pkt_num;
net_stats->rx_length_errors += net_stats->rx_length_errors +=
hw_stats->mac_stats.mac_rx_overrsize_pkt_num; hw_stats->mac_stats.mac_rx_oversize_pkt_num;
net_stats->rx_over_errors = net_stats->rx_over_errors =
hw_stats->mac_stats.mac_rx_overrsize_pkt_num; hw_stats->mac_stats.mac_rx_oversize_pkt_num;
} }
static void hclge_update_stats_for_all(struct hclge_dev *hdev) static void hclge_update_stats_for_all(struct hclge_dev *hdev)
...@@ -699,6 +732,9 @@ static void hclge_update_stats(struct hnae3_handle *handle, ...@@ -699,6 +732,9 @@ static void hclge_update_stats(struct hnae3_handle *handle,
struct hclge_hw_stats *hw_stats = &hdev->hw_stats; struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
int status; int status;
if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
return;
status = hclge_mac_update_stats(hdev); status = hclge_mac_update_stats(hdev);
if (status) if (status)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -724,6 +760,8 @@ static void hclge_update_stats(struct hnae3_handle *handle, ...@@ -724,6 +760,8 @@ static void hclge_update_stats(struct hnae3_handle *handle,
status); status);
hclge_update_netstat(hw_stats, net_stats); hclge_update_netstat(hw_stats, net_stats);
clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
} }
static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
...@@ -2203,8 +2241,11 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, ...@@ -2203,8 +2241,11 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
static int hclge_mac_init(struct hclge_dev *hdev) static int hclge_mac_init(struct hclge_dev *hdev)
{ {
struct hnae3_handle *handle = &hdev->vport[0].nic;
struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac; struct hclge_mac *mac = &hdev->hw.mac;
u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
int mtu;
int ret; int ret;
ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
...@@ -2238,11 +2279,25 @@ static int hclge_mac_init(struct hclge_dev *hdev) ...@@ -2238,11 +2279,25 @@ static int hclge_mac_init(struct hclge_dev *hdev)
} }
ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
if (ret) if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"set default mac_vlan_mask fail ret=%d\n", ret); "set default mac_vlan_mask fail ret=%d\n", ret);
return ret;
}
if (netdev)
mtu = netdev->mtu;
else
mtu = ETH_DATA_LEN;
ret = hclge_set_mtu(handle, mtu);
if (ret) {
dev_err(&hdev->pdev->dev,
"set mtu failed ret=%d\n", ret);
return ret; return ret;
}
return 0;
} }
static void hclge_mbx_task_schedule(struct hclge_dev *hdev) static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
...@@ -2381,6 +2436,7 @@ static void hclge_service_timer(struct timer_list *t) ...@@ -2381,6 +2436,7 @@ static void hclge_service_timer(struct timer_list *t)
struct hclge_dev *hdev = from_timer(hdev, t, service_timer); struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
mod_timer(&hdev->service_timer, jiffies + HZ); mod_timer(&hdev->service_timer, jiffies + HZ);
hdev->hw_stats.stats_timer++;
hclge_task_schedule(hdev); hclge_task_schedule(hdev);
} }
...@@ -2780,9 +2836,13 @@ static void hclge_service_task(struct work_struct *work) ...@@ -2780,9 +2836,13 @@ static void hclge_service_task(struct work_struct *work)
struct hclge_dev *hdev = struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task); container_of(work, struct hclge_dev, service_task);
if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
hclge_update_stats_for_all(hdev);
hdev->hw_stats.stats_timer = 0;
}
hclge_update_speed_duplex(hdev); hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev); hclge_update_link_status(hdev);
hclge_update_stats_for_all(hdev);
hclge_service_complete(hdev); hclge_service_complete(hdev);
} }
...@@ -4197,6 +4257,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) ...@@ -4197,6 +4257,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
const unsigned char *new_addr = (const unsigned char *)p; const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
int ret;
/* mac addr check */ /* mac addr check */
if (is_zero_ether_addr(new_addr) || if (is_zero_ether_addr(new_addr) ||
...@@ -4208,14 +4269,39 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) ...@@ -4208,14 +4269,39 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
return -EINVAL; return -EINVAL;
} }
hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); ret = hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
if (ret)
dev_warn(&hdev->pdev->dev,
"remove old uc mac address fail, ret =%d.\n",
ret);
ret = hclge_add_uc_addr(handle, new_addr);
if (ret) {
dev_err(&hdev->pdev->dev,
"add uc mac address fail, ret =%d.\n",
ret);
if (!hclge_add_uc_addr(handle, new_addr)) { ret = hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr);
ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); if (ret) {
return 0; dev_err(&hdev->pdev->dev,
"restore uc mac address fail, ret =%d.\n",
ret);
}
return -EIO;
} }
ret = hclge_mac_pause_addr_cfg(hdev, new_addr);
if (ret) {
dev_err(&hdev->pdev->dev,
"configure mac pause address fail, ret =%d.\n",
ret);
return -EIO; return -EIO;
}
ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
return 0;
} }
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
...@@ -4241,6 +4327,17 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, ...@@ -4241,6 +4327,17 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
return 0; return 0;
} }
#define HCLGE_FILTER_TYPE_VF 0
#define HCLGE_FILTER_TYPE_PORT 1
static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
}
int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
bool is_kill, u16 vlan, u8 qos, __be16 proto) bool is_kill, u16 vlan, u8 qos, __be16 proto)
{ {
...@@ -4469,8 +4566,6 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) ...@@ -4469,8 +4566,6 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
static int hclge_init_vlan_config(struct hclge_dev *hdev) static int hclge_init_vlan_config(struct hclge_dev *hdev)
{ {
#define HCLGE_FILTER_TYPE_VF 0
#define HCLGE_FILTER_TYPE_PORT 1
#define HCLGE_DEF_VLAN_TYPE 0x8100 #define HCLGE_DEF_VLAN_TYPE 0x8100
struct hnae3_handle *handle; struct hnae3_handle *handle;
...@@ -4542,16 +4637,21 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) ...@@ -4542,16 +4637,21 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
struct hclge_config_max_frm_size_cmd *req; struct hclge_config_max_frm_size_cmd *req;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct hclge_desc desc; struct hclge_desc desc;
int max_frm_size;
int ret; int ret;
if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU)) max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
max_frm_size > HCLGE_MAC_MAX_FRAME)
return -EINVAL; return -EINVAL;
hdev->mps = new_mtu; max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
req = (struct hclge_config_max_frm_size_cmd *)desc.data; req = (struct hclge_config_max_frm_size_cmd *)desc.data;
req->max_frm_size = cpu_to_le16(new_mtu); req->max_frm_size = cpu_to_le16(max_frm_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
...@@ -4559,6 +4659,8 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) ...@@ -4559,6 +4659,8 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
return ret; return ret;
} }
hdev->mps = max_frm_size;
return 0; return 0;
} }
...@@ -4689,22 +4791,19 @@ static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) ...@@ -4689,22 +4791,19 @@ static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{ {
enum hclge_fc_mode fc_mode;
int ret; int ret;
if (rx_en && tx_en) if (rx_en && tx_en)
fc_mode = HCLGE_FC_FULL; hdev->fc_mode_last_time = HCLGE_FC_FULL;
else if (rx_en && !tx_en) else if (rx_en && !tx_en)
fc_mode = HCLGE_FC_RX_PAUSE; hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
else if (!rx_en && tx_en) else if (!rx_en && tx_en)
fc_mode = HCLGE_FC_TX_PAUSE; hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
else else
fc_mode = HCLGE_FC_NONE; hdev->fc_mode_last_time = HCLGE_FC_NONE;
if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
hdev->fc_mode_last_time = fc_mode;
return 0; return 0;
}
ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
if (ret) { if (ret) {
...@@ -4713,7 +4812,7 @@ static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) ...@@ -4713,7 +4812,7 @@ static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
return ret; return ret;
} }
hdev->tm_info.fc_mode = fc_mode; hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
return 0; return 0;
} }
...@@ -5482,6 +5581,7 @@ static const struct hnae3_ae_ops hclge_ops = { ...@@ -5482,6 +5581,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_sset_count = hclge_get_sset_count, .get_sset_count = hclge_get_sset_count,
.get_fw_version = hclge_get_fw_version, .get_fw_version = hclge_get_fw_version,
.get_mdix_mode = hclge_get_mdix_mode, .get_mdix_mode = hclge_get_mdix_mode,
.enable_vlan_filter = hclge_enable_vlan_filter,
.set_vlan_filter = hclge_set_port_vlan_filter, .set_vlan_filter = hclge_set_port_vlan_filter,
.set_vf_vlan_filter = hclge_set_vf_vlan_filter, .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
......
...@@ -101,6 +101,11 @@ ...@@ -101,6 +101,11 @@
/* CMDQ register bits for RX event(=MBX event) */ /* CMDQ register bits for RX event(=MBX event) */
#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
#define HCLGE_MAC_MIN_FRAME 64
#define HCLGE_MAC_MAX_FRAME 9728
enum HCLGE_DEV_STATE { enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING, HCLGE_STATE_REINITING,
HCLGE_STATE_DOWN, HCLGE_STATE_DOWN,
...@@ -112,6 +117,7 @@ enum HCLGE_DEV_STATE { ...@@ -112,6 +117,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_RST_HANDLING, HCLGE_STATE_RST_HANDLING,
HCLGE_STATE_MBX_SERVICE_SCHED, HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING, HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_MAX HCLGE_STATE_MAX
}; };
...@@ -381,14 +387,23 @@ struct hclge_mac_stats { ...@@ -381,14 +387,23 @@ struct hclge_mac_stats {
u64 mac_tx_multi_pkt_num; u64 mac_tx_multi_pkt_num;
u64 mac_tx_broad_pkt_num; u64 mac_tx_broad_pkt_num;
u64 mac_tx_undersize_pkt_num; u64 mac_tx_undersize_pkt_num;
u64 mac_tx_overrsize_pkt_num; u64 mac_tx_oversize_pkt_num;
u64 mac_tx_64_oct_pkt_num; u64 mac_tx_64_oct_pkt_num;
u64 mac_tx_65_127_oct_pkt_num; u64 mac_tx_65_127_oct_pkt_num;
u64 mac_tx_128_255_oct_pkt_num; u64 mac_tx_128_255_oct_pkt_num;
u64 mac_tx_256_511_oct_pkt_num; u64 mac_tx_256_511_oct_pkt_num;
u64 mac_tx_512_1023_oct_pkt_num; u64 mac_tx_512_1023_oct_pkt_num;
u64 mac_tx_1024_1518_oct_pkt_num; u64 mac_tx_1024_1518_oct_pkt_num;
u64 mac_tx_1519_max_oct_pkt_num; u64 mac_tx_1519_2047_oct_pkt_num;
u64 mac_tx_2048_4095_oct_pkt_num;
u64 mac_tx_4096_8191_oct_pkt_num;
u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */
u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */
u64 mac_tx_12288_16383_oct_pkt_num;
u64 mac_tx_1519_max_good_oct_pkt_num;
u64 mac_tx_1519_max_bad_oct_pkt_num;
u64 mac_rx_total_pkt_num; u64 mac_rx_total_pkt_num;
u64 mac_rx_total_oct_num; u64 mac_rx_total_oct_num;
u64 mac_rx_good_pkt_num; u64 mac_rx_good_pkt_num;
...@@ -399,33 +414,43 @@ struct hclge_mac_stats { ...@@ -399,33 +414,43 @@ struct hclge_mac_stats {
u64 mac_rx_multi_pkt_num; u64 mac_rx_multi_pkt_num;
u64 mac_rx_broad_pkt_num; u64 mac_rx_broad_pkt_num;
u64 mac_rx_undersize_pkt_num; u64 mac_rx_undersize_pkt_num;
u64 mac_rx_overrsize_pkt_num; u64 mac_rx_oversize_pkt_num;
u64 mac_rx_64_oct_pkt_num; u64 mac_rx_64_oct_pkt_num;
u64 mac_rx_65_127_oct_pkt_num; u64 mac_rx_65_127_oct_pkt_num;
u64 mac_rx_128_255_oct_pkt_num; u64 mac_rx_128_255_oct_pkt_num;
u64 mac_rx_256_511_oct_pkt_num; u64 mac_rx_256_511_oct_pkt_num;
u64 mac_rx_512_1023_oct_pkt_num; u64 mac_rx_512_1023_oct_pkt_num;
u64 mac_rx_1024_1518_oct_pkt_num; u64 mac_rx_1024_1518_oct_pkt_num;
u64 mac_rx_1519_max_oct_pkt_num; u64 mac_rx_1519_2047_oct_pkt_num;
u64 mac_rx_2048_4095_oct_pkt_num;
u64 mac_trans_fragment_pkt_num; u64 mac_rx_4096_8191_oct_pkt_num;
u64 mac_trans_undermin_pkt_num; u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */
u64 mac_trans_jabber_pkt_num; u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
u64 mac_trans_err_all_pkt_num; u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */
u64 mac_trans_from_app_good_pkt_num; u64 mac_rx_12288_16383_oct_pkt_num;
u64 mac_trans_from_app_bad_pkt_num; u64 mac_rx_1519_max_good_oct_pkt_num;
u64 mac_rcv_fragment_pkt_num; u64 mac_rx_1519_max_bad_oct_pkt_num;
u64 mac_rcv_undermin_pkt_num;
u64 mac_rcv_jabber_pkt_num; u64 mac_tx_fragment_pkt_num;
u64 mac_rcv_fcs_err_pkt_num; u64 mac_tx_undermin_pkt_num;
u64 mac_rcv_send_app_good_pkt_num; u64 mac_tx_jabber_pkt_num;
u64 mac_rcv_send_app_bad_pkt_num; u64 mac_tx_err_all_pkt_num;
u64 mac_tx_from_app_good_pkt_num;
u64 mac_tx_from_app_bad_pkt_num;
u64 mac_rx_fragment_pkt_num;
u64 mac_rx_undermin_pkt_num;
u64 mac_rx_jabber_pkt_num;
u64 mac_rx_fcs_err_pkt_num;
u64 mac_rx_send_app_good_pkt_num;
u64 mac_rx_send_app_bad_pkt_num;
}; };
#define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
struct hclge_hw_stats { struct hclge_hw_stats {
struct hclge_mac_stats mac_stats; struct hclge_mac_stats mac_stats;
struct hclge_64_bit_stats all_64_bit_stats; struct hclge_64_bit_stats all_64_bit_stats;
struct hclge_32_bit_stats all_32_bit_stats; struct hclge_32_bit_stats all_32_bit_stats;
u32 stats_timer;
}; };
struct hclge_vlan_type_cfg { struct hclge_vlan_type_cfg {
......
...@@ -138,6 +138,46 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, ...@@ -138,6 +138,46 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
return hclge_cmd_send(&hdev->hw, &desc, 1); return hclge_cmd_send(&hdev->hw, &desc, 1);
} }
static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
u8 pause_trans_gap, u16 pause_trans_time)
{
struct hclge_cfg_pause_param_cmd *pause_param;
struct hclge_desc desc;
pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
ether_addr_copy(pause_param->mac_addr, addr);
pause_param->pause_trans_gap = pause_trans_gap;
pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
{
struct hclge_cfg_pause_param_cmd *pause_param;
struct hclge_desc desc;
u16 trans_time;
u8 trans_gap;
int ret;
pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
return ret;
trans_gap = pause_param->pause_trans_gap;
trans_time = le16_to_cpu(pause_param->pause_trans_time);
return hclge_mac_pause_param_cfg(hdev, mac_addr, trans_gap,
trans_time);
}
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
{ {
u8 tc; u8 tc;
...@@ -1056,6 +1096,15 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) ...@@ -1056,6 +1096,15 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
return hclge_tm_schd_mode_hw(hdev); return hclge_tm_schd_mode_hw(hdev);
} }
static int hclge_mac_pause_param_setup_hw(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
return hclge_mac_pause_param_cfg(hdev, mac->mac_addr,
HCLGE_DEFAULT_PAUSE_TRANS_GAP,
HCLGE_DEFAULT_PAUSE_TRANS_TIME);
}
static int hclge_pfc_setup_hw(struct hclge_dev *hdev) static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
{ {
u8 enable_bitmap = 0; u8 enable_bitmap = 0;
...@@ -1102,8 +1151,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) ...@@ -1102,8 +1151,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
int ret; int ret;
u8 i; u8 i;
if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
return hclge_mac_pause_setup_hw(hdev); ret = hclge_mac_pause_setup_hw(hdev);
if (ret)
return ret;
return hclge_mac_pause_param_setup_hw(hdev);
}
/* Only DCB-supported dev supports qset back pressure and pfc cmd */ /* Only DCB-supported dev supports qset back pressure and pfc cmd */
if (!hnae3_dev_dcb_supported(hdev)) if (!hnae3_dev_dcb_supported(hdev))
......
...@@ -18,6 +18,9 @@ ...@@ -18,6 +18,9 @@
#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0) #define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0xFF
#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
/* SP or DWRR */ /* SP or DWRR */
#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0) #define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE) #define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
...@@ -99,6 +102,13 @@ struct hclge_pfc_en_cmd { ...@@ -99,6 +102,13 @@ struct hclge_pfc_en_cmd {
u8 pri_en_bitmap; u8 pri_en_bitmap;
}; };
struct hclge_cfg_pause_param_cmd {
u8 mac_addr[ETH_ALEN];
u8 pause_trans_gap;
u8 rsvd;
__le16 pause_trans_time;
};
struct hclge_port_shapping_cmd { struct hclge_port_shapping_cmd {
__le32 port_shapping_para; __le32 port_shapping_para;
}; };
...@@ -119,4 +129,5 @@ int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); ...@@ -119,4 +129,5 @@ int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_map_cfg(struct hclge_dev *hdev); int hclge_tm_map_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev); int hclge_tm_init_hw(struct hclge_dev *hdev);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
#endif #endif
...@@ -49,7 +49,7 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) ...@@ -49,7 +49,7 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
return status; return status;
} }
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
le32_to_cpu(desc.data[4]); le32_to_cpu(desc.data[1]);
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
true); true);
...@@ -63,7 +63,7 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) ...@@ -63,7 +63,7 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
return status; return status;
} }
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
le32_to_cpu(desc.data[4]); le32_to_cpu(desc.data[1]);
} }
return 0; return 0;
...@@ -105,7 +105,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) ...@@ -105,7 +105,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < hdev->num_tqps; i++) { for (i = 0; i < hdev->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclgevf_tqp, q); struct hclgevf_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
tqp->index); tqp->index);
buff += ETH_GSTRING_LEN; buff += ETH_GSTRING_LEN;
} }
...@@ -113,7 +113,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) ...@@ -113,7 +113,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < hdev->num_tqps; i++) { for (i = 0; i < hdev->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclgevf_tqp, q); struct hclgevf_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
tqp->index); tqp->index);
buff += ETH_GSTRING_LEN; buff += ETH_GSTRING_LEN;
} }
...@@ -1288,7 +1288,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) ...@@ -1288,7 +1288,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
pci_set_master(pdev); pci_set_master(pdev);
hw = &hdev->hw; hw = &hdev->hw;
hw->hdev = hdev; hw->hdev = hdev;
hw->io_base = pci_iomap(pdev, 2, 0);; hw->io_base = pci_iomap(pdev, 2, 0);
if (!hw->io_base) { if (!hw->io_base) {
dev_err(&pdev->dev, "can't map configuration register space\n"); dev_err(&pdev->dev, "can't map configuration register space\n");
ret = -ENOMEM; ret = -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment