Commit 2bd7c3e1 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-hns3-add-some-optimizations-and-cleanups'

Huazhong Tan says:

====================
net: hns3: add some optimizations and cleanups

This series adds some code optimizations and cleanups for
the HNS3 ethernet driver.

[patch 1/9] dumps some debug information when reset fail.

[patch 2/9] dumps some struct netdev_queue information when
TX timeout.

[patch 3/9] cleanups some magic numbers.

[patch 4/9] cleanups some coding style issue.

[patch 5/9] fixes a compiler warning.

[patch 6/9] optimizes some local variable initialization.

[patch 7/9] modifies some comments.

[patch 8/9] cleanups some print format warnings.

[patch 9/9] cleanups byte order issue.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 79697744 39edaf24
...@@ -47,7 +47,7 @@ enum HCLGE_MBX_OPCODE { ...@@ -47,7 +47,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */ HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */ HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */
}; };
...@@ -72,7 +72,7 @@ enum hclge_mbx_vlan_cfg_subcode { ...@@ -72,7 +72,7 @@ enum hclge_mbx_vlan_cfg_subcode {
}; };
#define HCLGE_MBX_MAX_MSG_SIZE 16 #define HCLGE_MBX_MAX_MSG_SIZE 16
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8 #define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3 #define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3 #define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
......
...@@ -146,7 +146,7 @@ void hnae3_unregister_client(struct hnae3_client *client) ...@@ -146,7 +146,7 @@ void hnae3_unregister_client(struct hnae3_client *client)
return; return;
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
/* one system should only have one client for every type */
list_for_each_entry(client_tmp, &hnae3_client_list, node) { list_for_each_entry(client_tmp, &hnae3_client_list, node) {
if (client_tmp->type == client->type) { if (client_tmp->type == client->type) {
existed = true; existed = true;
......
...@@ -130,7 +130,6 @@ enum hnae3_module_type { ...@@ -130,7 +130,6 @@ enum hnae3_module_type {
HNAE3_MODULE_TYPE_CR = 0x04, HNAE3_MODULE_TYPE_CR = 0x04,
HNAE3_MODULE_TYPE_KR = 0x05, HNAE3_MODULE_TYPE_KR = 0x05,
HNAE3_MODULE_TYPE_TP = 0x06, HNAE3_MODULE_TYPE_TP = 0x06,
}; };
enum hnae3_fec_mode { enum hnae3_fec_mode {
...@@ -576,7 +575,8 @@ struct hnae3_ae_algo { ...@@ -576,7 +575,8 @@ struct hnae3_ae_algo {
const struct pci_device_id *pdev_id_table; const struct pci_device_id *pdev_id_table;
}; };
#define HNAE3_INT_NAME_LEN (IFNAMSIZ + 16) #define HNAE3_INT_NAME_EXT_LEN 32 /* Max extra information length */
#define HNAE3_INT_NAME_LEN (IFNAMSIZ + HNAE3_INT_NAME_EXT_LEN)
#define HNAE3_ITR_COUNTDOWN_START 100 #define HNAE3_ITR_COUNTDOWN_START 100
struct hnae3_tc_info { struct hnae3_tc_info {
......
...@@ -57,68 +57,68 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, ...@@ -57,68 +57,68 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
HNS3_RING_RX_RING_BASEADDR_H_REG); HNS3_RING_RX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base + base_add_l = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_L_REG); HNS3_RING_RX_RING_BASEADDR_L_REG);
dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i, dev_info(&h->pdev->dev, "RX(%u) BASE ADD: 0x%08x%08x\n", i,
base_add_h, base_add_l); base_add_h, base_add_l);
value = readl_relaxed(ring->tqp->io_base + value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_NUM_REG); HNS3_RING_RX_RING_BD_NUM_REG);
dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value); dev_info(&h->pdev->dev, "RX(%u) RING BD NUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base + value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_LEN_REG); HNS3_RING_RX_RING_BD_LEN_REG);
dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value); dev_info(&h->pdev->dev, "RX(%u) RING BD LEN: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base + value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_TAIL_REG); HNS3_RING_RX_RING_TAIL_REG);
dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value); dev_info(&h->pdev->dev, "RX(%u) RING TAIL: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base + value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG); HNS3_RING_RX_RING_HEAD_REG);
dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value); dev_info(&h->pdev->dev, "RX(%u) RING HEAD: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base + value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_FBDNUM_REG); HNS3_RING_RX_RING_FBDNUM_REG);
dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value); dev_info(&h->pdev->dev, "RX(%u) RING FBDNUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base + value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_PKTNUM_RECORD_REG); HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value); dev_info(&h->pdev->dev, "RX(%u) RING PKTNUM: %u\n", i, value);
ring = &priv->ring[i]; ring = &priv->ring[i];
base_add_h = readl_relaxed(ring->tqp->io_base + base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_H_REG); HNS3_RING_TX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base + base_add_l = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_L_REG); HNS3_RING_TX_RING_BASEADDR_L_REG);
dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i, dev_info(&h->pdev->dev, "TX(%u) BASE ADD: 0x%08x%08x\n", i,
base_add_h, base_add_l); base_add_h, base_add_l);
value = readl_relaxed(ring->tqp->io_base + value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BD_NUM_REG); HNS3_RING_TX_RING_BD_NUM_REG);
dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value); dev_info(&h->pdev->dev, "TX(%u) RING BD NUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base + value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TC_REG); HNS3_RING_TX_RING_TC_REG);
dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value); dev_info(&h->pdev->dev, "TX(%u) RING TC: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base + value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG); HNS3_RING_TX_RING_TAIL_REG);
dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value); dev_info(&h->pdev->dev, "TX(%u) RING TAIL: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base + value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG); HNS3_RING_TX_RING_HEAD_REG);
dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value); dev_info(&h->pdev->dev, "TX(%u) RING HEAD: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base + value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_FBDNUM_REG); HNS3_RING_TX_RING_FBDNUM_REG);
dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value); dev_info(&h->pdev->dev, "TX(%u) RING FBDNUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base + value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_OFFSET_REG); HNS3_RING_TX_RING_OFFSET_REG);
dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value); dev_info(&h->pdev->dev, "TX(%u) RING OFFSET: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base + value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG); HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i, dev_info(&h->pdev->dev, "TX(%u) RING PKTNUM: %u\n\n", i,
value); value);
} }
...@@ -190,21 +190,24 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) ...@@ -190,21 +190,24 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
addr = le64_to_cpu(tx_desc->addr); addr = le64_to_cpu(tx_desc->addr);
dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index); dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
dev_info(dev, "(TX)addr: %pad\n", &addr); dev_info(dev, "(TX)addr: %pad\n", &addr);
dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag); dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.vlan_tag));
dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size); dev_info(dev, "(TX)send_size: %u\n",
le16_to_cpu(tx_desc->tx.send_size));
dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso); dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len); dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len); dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len); dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.outer_vlan_tag); dev_info(dev, "(TX)vlan_tag: %u\n",
dev_info(dev, "(TX)tv: %u\n", tx_desc->tx.tv); le16_to_cpu(tx_desc->tx.outer_vlan_tag));
dev_info(dev, "(TX)tv: %u\n", le16_to_cpu(tx_desc->tx.tv));
dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec); dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec);
dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len); dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len); dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len); dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
dev_info(dev, "(TX)paylen: %u\n", tx_desc->tx.paylen); dev_info(dev, "(TX)paylen: %u\n", le32_to_cpu(tx_desc->tx.paylen));
dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri); dev_info(dev, "(TX)vld_ra_ri: %u\n",
dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss); le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss));
ring = &priv->ring[q_num + h->kinfo.num_tqps]; ring = &priv->ring[q_num + h->kinfo.num_tqps];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
...@@ -214,15 +217,19 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) ...@@ -214,15 +217,19 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
addr = le64_to_cpu(rx_desc->addr); addr = le64_to_cpu(rx_desc->addr);
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index); dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
dev_info(dev, "(RX)addr: %pad\n", &addr); dev_info(dev, "(RX)addr: %pad\n", &addr);
dev_info(dev, "(RX)l234_info: %u\n", rx_desc->rx.l234_info); dev_info(dev, "(RX)l234_info: %u\n",
dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len); le32_to_cpu(rx_desc->rx.l234_info));
dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size); dev_info(dev, "(RX)pkt_len: %u\n", le16_to_cpu(rx_desc->rx.pkt_len));
dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash); dev_info(dev, "(RX)size: %u\n", le16_to_cpu(rx_desc->rx.size));
dev_info(dev, "(RX)fd_id: %u\n", rx_desc->rx.fd_id); dev_info(dev, "(RX)rss_hash: %u\n", le32_to_cpu(rx_desc->rx.rss_hash));
dev_info(dev, "(RX)vlan_tag: %u\n", rx_desc->rx.vlan_tag); dev_info(dev, "(RX)fd_id: %u\n", le16_to_cpu(rx_desc->rx.fd_id));
dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n", rx_desc->rx.o_dm_vlan_id_fb); dev_info(dev, "(RX)vlan_tag: %u\n", le16_to_cpu(rx_desc->rx.vlan_tag));
dev_info(dev, "(RX)ot_vlan_tag: %u\n", rx_desc->rx.ot_vlan_tag); dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n",
dev_info(dev, "(RX)bd_base_info: %u\n", rx_desc->rx.bd_base_info); le16_to_cpu(rx_desc->rx.o_dm_vlan_id_fb));
dev_info(dev, "(RX)ot_vlan_tag: %u\n",
le16_to_cpu(rx_desc->rx.ot_vlan_tag));
dev_info(dev, "(RX)bd_base_info: %u\n",
le32_to_cpu(rx_desc->rx.bd_base_info));
return 0; return 0;
} }
......
...@@ -1710,8 +1710,8 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, ...@@ -1710,8 +1710,8 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
int ret = -EIO; int ret = -EIO;
netif_dbg(h, drv, netdev, netif_dbg(h, drv, netdev,
"set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n", "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
vf, vlan, qos, vlan_proto); vf, vlan, qos, ntohs(vlan_proto));
if (h->ae_algo->ops->set_vf_vlan_filter) if (h->ae_algo->ops->set_vf_vlan_filter)
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
...@@ -1771,7 +1771,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) ...@@ -1771,7 +1771,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
{ {
struct hns3_nic_priv *priv = netdev_priv(ndev); struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring = NULL; struct hns3_enet_ring *tx_ring;
struct napi_struct *napi; struct napi_struct *napi;
int timeout_queue = 0; int timeout_queue = 0;
int hw_head, hw_tail; int hw_head, hw_tail;
...@@ -1792,6 +1792,9 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) ...@@ -1792,6 +1792,9 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
time_after(jiffies, time_after(jiffies,
(trans_start + ndev->watchdog_timeo))) { (trans_start + ndev->watchdog_timeo))) {
timeout_queue = i; timeout_queue = i;
netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
q->state,
jiffies_to_msecs(jiffies - trans_start));
break; break;
} }
} }
...@@ -1999,7 +2002,7 @@ bool hns3_is_phys_func(struct pci_dev *pdev) ...@@ -1999,7 +2002,7 @@ bool hns3_is_phys_func(struct pci_dev *pdev)
case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
return false; return false;
default: default:
dev_warn(&pdev->dev, "un-recognized pci device-id %d", dev_warn(&pdev->dev, "un-recognized pci device-id %u",
dev_id); dev_id);
} }
...@@ -3936,14 +3939,14 @@ static void hns3_info_show(struct hns3_nic_priv *priv) ...@@ -3936,14 +3939,14 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps); dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size); dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size); dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len); dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc); dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc); dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc); dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu); dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
} }
static int hns3_client_init(struct hnae3_handle *handle) static int hns3_client_init(struct hnae3_handle *handle)
...@@ -4563,7 +4566,7 @@ int hns3_set_channels(struct net_device *netdev, ...@@ -4563,7 +4566,7 @@ int hns3_set_channels(struct net_device *netdev,
if (new_tqp_num > hns3_get_max_available_channels(h) || if (new_tqp_num > hns3_get_max_available_channels(h) ||
new_tqp_num < 1) { new_tqp_num < 1) {
dev_err(&netdev->dev, dev_err(&netdev->dev,
"Change tqps fail, the tqp range is from 1 to %d", "Change tqps fail, the tqp range is from 1 to %u",
hns3_get_max_available_channels(h)); hns3_get_max_available_channels(h));
return -EINVAL; return -EINVAL;
} }
......
...@@ -985,7 +985,7 @@ static int hns3_set_ringparam(struct net_device *ndev, ...@@ -985,7 +985,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
} }
netdev_info(ndev, netdev_info(ndev,
"Changing Tx/Rx ring depth from %d/%d to %d/%d\n", "Changing Tx/Rx ring depth from %u/%u to %u/%u\n",
old_tx_desc_num, old_rx_desc_num, old_tx_desc_num, old_rx_desc_num,
new_tx_desc_num, new_rx_desc_num); new_tx_desc_num, new_rx_desc_num);
...@@ -1097,7 +1097,7 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, ...@@ -1097,7 +1097,7 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
if (queue >= queue_num) { if (queue >= queue_num) {
netdev_err(netdev, netdev_err(netdev,
"Invalid queue value %d! Queue max id=%d\n", "Invalid queue value %u! Queue max id=%u\n",
queue, queue_num - 1); queue, queue_num - 1);
return -EINVAL; return -EINVAL;
} }
...@@ -1147,14 +1147,14 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev, ...@@ -1147,14 +1147,14 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev,
rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs); rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
if (rx_gl != cmd->rx_coalesce_usecs) { if (rx_gl != cmd->rx_coalesce_usecs) {
netdev_info(netdev, netdev_info(netdev,
"rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", "rx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
cmd->rx_coalesce_usecs, rx_gl); cmd->rx_coalesce_usecs, rx_gl);
} }
tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs); tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
if (tx_gl != cmd->tx_coalesce_usecs) { if (tx_gl != cmd->tx_coalesce_usecs) {
netdev_info(netdev, netdev_info(netdev,
"tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", "tx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
cmd->tx_coalesce_usecs, tx_gl); cmd->tx_coalesce_usecs, tx_gl);
} }
...@@ -1182,7 +1182,7 @@ static int hns3_check_rl_coalesce_para(struct net_device *netdev, ...@@ -1182,7 +1182,7 @@ static int hns3_check_rl_coalesce_para(struct net_device *netdev,
rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high); rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
if (rl != cmd->rx_coalesce_usecs_high) { if (rl != cmd->rx_coalesce_usecs_high) {
netdev_info(netdev, netdev_info(netdev,
"usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n", "usecs_high(%u) rounded down to %u, because it must be multiple of 4.\n",
cmd->rx_coalesce_usecs_high, rl); cmd->rx_coalesce_usecs_high, rl);
} }
...@@ -1211,7 +1211,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev, ...@@ -1211,7 +1211,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
if (cmd->use_adaptive_tx_coalesce == 1 || if (cmd->use_adaptive_tx_coalesce == 1 ||
cmd->use_adaptive_rx_coalesce == 1) { cmd->use_adaptive_rx_coalesce == 1) {
netdev_info(netdev, netdev_info(netdev,
"adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n", "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
cmd->use_adaptive_tx_coalesce, cmd->use_adaptive_tx_coalesce,
cmd->use_adaptive_rx_coalesce); cmd->use_adaptive_rx_coalesce);
} }
......
...@@ -145,7 +145,7 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw) ...@@ -145,7 +145,7 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
rmb(); /* Make sure head is ready before touch any data */ rmb(); /* Make sure head is ready before touch any data */
if (!is_valid_csq_clean_head(csq, head)) { if (!is_valid_csq_clean_head(csq, head)) {
dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
csq->next_to_use, csq->next_to_clean); csq->next_to_use, csq->next_to_clean);
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"Disabling any further commands to IMP firmware\n"); "Disabling any further commands to IMP firmware\n");
...@@ -314,11 +314,10 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) ...@@ -314,11 +314,10 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
} while (timeout < hw->cmq.tx_timeout); } while (timeout < hw->cmq.tx_timeout);
} }
if (!complete) { if (!complete)
retval = -EBADE; retval = -EBADE;
} else { else
retval = hclge_cmd_check_retval(hw, desc, num, ntc); retval = hclge_cmd_check_retval(hw, desc, num, ntc);
}
/* Clean the command send queue */ /* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw); handle = hclge_cmd_csq_clean(hw);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#define HCLGE_CMDQ_TX_TIMEOUT 30000 #define HCLGE_CMDQ_TX_TIMEOUT 30000
#define HCLGE_DESC_DATA_LEN 6
struct hclge_dev; struct hclge_dev;
struct hclge_desc { struct hclge_desc {
...@@ -19,7 +20,7 @@ struct hclge_desc { ...@@ -19,7 +20,7 @@ struct hclge_desc {
__le16 flag; __le16 flag;
__le16 retval; __le16 retval;
__le16 rsv; __le16 rsv;
__le32 data[6]; __le32 data[HCLGE_DESC_DATA_LEN];
}; };
struct hclge_cmq_ring { struct hclge_cmq_ring {
...@@ -260,6 +261,7 @@ enum hclge_opcode_type { ...@@ -260,6 +261,7 @@ enum hclge_opcode_type {
/* NCL config command */ /* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011, HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
/* M7 stats command */ /* M7 stats command */
HCLGE_OPC_M7_STATS_BD = 0x7012, HCLGE_OPC_M7_STATS_BD = 0x7012,
HCLGE_OPC_M7_STATS_INFO = 0x7013, HCLGE_OPC_M7_STATS_INFO = 0x7013,
...@@ -429,8 +431,10 @@ struct hclge_rx_pkt_buf_cmd { ...@@ -429,8 +431,10 @@ struct hclge_rx_pkt_buf_cmd {
#define HCLGE_PF_MAC_NUM_MASK 0x3 #define HCLGE_PF_MAC_NUM_MASK 0x3
#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B) #define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B)
#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B) #define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B)
#define HCLGE_VF_RST_STATUS_CMD 4
struct hclge_func_status_cmd { struct hclge_func_status_cmd {
__le32 vf_rst_state[4]; __le32 vf_rst_state[HCLGE_VF_RST_STATUS_CMD];
u8 pf_state; u8 pf_state;
u8 mac_id; u8 mac_id;
u8 rsv1; u8 rsv1;
...@@ -486,10 +490,12 @@ struct hclge_pf_res_cmd { ...@@ -486,10 +490,12 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_UMV_TBL_SPACE_S 16 #define HCLGE_CFG_UMV_TBL_SPACE_S 16
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16) #define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
#define HCLGE_CFG_CMD_CNT 4
struct hclge_cfg_param_cmd { struct hclge_cfg_param_cmd {
__le32 offset; __le32 offset;
__le32 rsv; __le32 rsv;
__le32 param[4]; __le32 param[HCLGE_CFG_CMD_CNT];
}; };
#define HCLGE_MAC_MODE 0x0 #define HCLGE_MAC_MODE 0x0
...@@ -758,20 +764,27 @@ struct hclge_vlan_filter_ctrl_cmd { ...@@ -758,20 +764,27 @@ struct hclge_vlan_filter_ctrl_cmd {
u8 rsv2[19]; u8 rsv2[19];
}; };
#define HCLGE_VLAN_ID_OFFSET_STEP 160
#define HCLGE_VLAN_BYTE_SIZE 8
#define HCLGE_VLAN_OFFSET_BITMAP \
(HCLGE_VLAN_ID_OFFSET_STEP / HCLGE_VLAN_BYTE_SIZE)
struct hclge_vlan_filter_pf_cfg_cmd { struct hclge_vlan_filter_pf_cfg_cmd {
u8 vlan_offset; u8 vlan_offset;
u8 vlan_cfg; u8 vlan_cfg;
u8 rsv[2]; u8 rsv[2];
u8 vlan_offset_bitmap[20]; u8 vlan_offset_bitmap[HCLGE_VLAN_OFFSET_BITMAP];
}; };
#define HCLGE_MAX_VF_BYTES 16
struct hclge_vlan_filter_vf_cfg_cmd { struct hclge_vlan_filter_vf_cfg_cmd {
__le16 vlan_id; __le16 vlan_id;
u8 resp_code; u8 resp_code;
u8 rsv; u8 rsv;
u8 vlan_cfg; u8 vlan_cfg;
u8 rsv1[3]; u8 rsv1[3];
u8 vf_bitmap[16]; u8 vf_bitmap[HCLGE_MAX_VF_BYTES];
}; };
#define HCLGE_SWITCH_ANTI_SPOOF_B 0U #define HCLGE_SWITCH_ANTI_SPOOF_B 0U
...@@ -806,6 +819,7 @@ enum hclge_mac_vlan_cfg_sel { ...@@ -806,6 +819,7 @@ enum hclge_mac_vlan_cfg_sel {
#define HCLGE_CFG_NIC_ROCE_SEL_B 4 #define HCLGE_CFG_NIC_ROCE_SEL_B 4
#define HCLGE_ACCEPT_TAG2_B 5 #define HCLGE_ACCEPT_TAG2_B 5
#define HCLGE_ACCEPT_UNTAG2_B 6 #define HCLGE_ACCEPT_UNTAG2_B 6
#define HCLGE_VF_NUM_PER_BYTE 8
struct hclge_vport_vtag_tx_cfg_cmd { struct hclge_vport_vtag_tx_cfg_cmd {
u8 vport_vlan_cfg; u8 vport_vlan_cfg;
...@@ -813,7 +827,7 @@ struct hclge_vport_vtag_tx_cfg_cmd { ...@@ -813,7 +827,7 @@ struct hclge_vport_vtag_tx_cfg_cmd {
u8 rsv1[2]; u8 rsv1[2];
__le16 def_vlan_tag1; __le16 def_vlan_tag1;
__le16 def_vlan_tag2; __le16 def_vlan_tag2;
u8 vf_bitmap[8]; u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
u8 rsv2[8]; u8 rsv2[8];
}; };
...@@ -825,7 +839,7 @@ struct hclge_vport_vtag_rx_cfg_cmd { ...@@ -825,7 +839,7 @@ struct hclge_vport_vtag_rx_cfg_cmd {
u8 vport_vlan_cfg; u8 vport_vlan_cfg;
u8 vf_offset; u8 vf_offset;
u8 rsv1[6]; u8 rsv1[6];
u8 vf_bitmap[8]; u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
u8 rsv2[8]; u8 rsv2[8];
}; };
...@@ -864,7 +878,7 @@ struct hclge_mac_ethertype_idx_rd_cmd { ...@@ -864,7 +878,7 @@ struct hclge_mac_ethertype_idx_rd_cmd {
u8 flags; u8 flags;
u8 resp_code; u8 resp_code;
__le16 vlan_tag; __le16 vlan_tag;
u8 mac_addr[6]; u8 mac_addr[ETH_ALEN];
__le16 index; __le16 index;
__le16 ethter_type; __le16 ethter_type;
__le16 egress_port; __le16 egress_port;
......
...@@ -87,7 +87,7 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, ...@@ -87,7 +87,7 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
if (prio_tc[i] >= num_tc) { if (prio_tc[i] >= num_tc) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"prio_tc[%u] checking failed, %u >= num_tc(%u)\n", "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
i, prio_tc[i], num_tc); i, prio_tc[i], num_tc);
return -EINVAL; return -EINVAL;
} }
......
...@@ -169,7 +169,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, ...@@ -169,7 +169,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
if (dfx_message->flag) if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n", dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
dfx_message->message, dfx_message->message,
desc->data[i % entries_per_desc]); le32_to_cpu(desc->data[i % entries_per_desc]));
dfx_message++; dfx_message++;
} }
...@@ -237,44 +237,48 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf) ...@@ -237,44 +237,48 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
if (ret) if (ret)
return; return;
dev_info(dev, "sch_nq_cnt: 0x%x\n", desc[0].data[1]); dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT); ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
if (ret) if (ret)
return; return;
dev_info(dev, "sch_rq_cnt: 0x%x\n", desc[0].data[1]); dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS); ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
if (ret) if (ret)
return; return;
dev_info(dev, "pri_bp: 0x%x\n", desc[0].data[1]); dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
dev_info(dev, "fifo_dfx_info: 0x%x\n", desc[0].data[2]); dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", desc[0].data[3]); dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n",
dev_info(dev, "tx_private_waterline: 0x%x\n", desc[0].data[4]); le32_to_cpu(desc[0].data[3]));
dev_info(dev, "tm_bypass_en: 0x%x\n", desc[0].data[5]); dev_info(dev, "tx_private_waterline: 0x%x\n",
dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", desc[1].data[0]); le32_to_cpu(desc[0].data[4]));
dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", desc[1].data[1]); dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5]));
dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
HCLGE_OPC_TM_INTERNAL_CNT); HCLGE_OPC_TM_INTERNAL_CNT);
if (ret) if (ret)
return; return;
dev_info(dev, "SCH_NIC_NUM: 0x%x\n", desc[0].data[1]); dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", desc[0].data[2]); dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
HCLGE_OPC_TM_INTERNAL_STS_1); HCLGE_OPC_TM_INTERNAL_STS_1);
if (ret) if (ret)
return; return;
dev_info(dev, "TC_MAP_SEL: 0x%x\n", desc[0].data[1]); dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", desc[0].data[2]); dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", desc[0].data[3]); dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3]));
dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[4]); dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n",
dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]); le32_to_cpu(desc[0].data[4]));
dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
le32_to_cpu(desc[0].data[5]));
} }
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf) static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
...@@ -364,7 +368,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) ...@@ -364,7 +368,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id); dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n", dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
pg_shap_cfg_cmd->pg_shapping_para); le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
cmd = HCLGE_OPC_TM_PG_P_SHAPPING; cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true); hclge_cmd_setup_basic_desc(&desc, cmd, true);
...@@ -375,7 +379,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) ...@@ -375,7 +379,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id); dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n", dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
pg_shap_cfg_cmd->pg_shapping_para); le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
cmd = HCLGE_OPC_TM_PORT_SHAPPING; cmd = HCLGE_OPC_TM_PORT_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true); hclge_cmd_setup_basic_desc(&desc, cmd, true);
...@@ -385,7 +389,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) ...@@ -385,7 +389,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n", dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
port_shap_cfg_cmd->port_shapping_para); le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG; cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true); hclge_cmd_setup_basic_desc(&desc, cmd, true);
...@@ -393,7 +397,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) ...@@ -393,7 +397,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret) if (ret)
goto err_tm_pg_cmd_send; goto err_tm_pg_cmd_send;
dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]); dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n",
le32_to_cpu(desc.data[0]));
cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG; cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true); hclge_cmd_setup_basic_desc(&desc, cmd, true);
...@@ -401,7 +406,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) ...@@ -401,7 +406,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret) if (ret)
goto err_tm_pg_cmd_send; goto err_tm_pg_cmd_send;
dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]); dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n",
le32_to_cpu(desc.data[0]));
cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG; cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true); hclge_cmd_setup_basic_desc(&desc, cmd, true);
...@@ -409,7 +415,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) ...@@ -409,7 +415,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
if (ret) if (ret)
goto err_tm_pg_cmd_send; goto err_tm_pg_cmd_send;
dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]); dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n",
le32_to_cpu(desc.data[0]));
if (!hnae3_dev_dcb_supported(hdev)) { if (!hnae3_dev_dcb_supported(hdev)) {
dev_info(&hdev->pdev->dev, dev_info(&hdev->pdev->dev,
...@@ -429,7 +436,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) ...@@ -429,7 +436,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n", dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
bp_to_qs_map_cmd->qs_group_id); bp_to_qs_map_cmd->qs_group_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n", dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
bp_to_qs_map_cmd->qs_bit_map); le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map));
return; return;
err_tm_pg_cmd_send: err_tm_pg_cmd_send:
...@@ -471,7 +478,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) ...@@ -471,7 +478,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data; qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n", dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
qs_to_pri_map->qs_id); le16_to_cpu(qs_to_pri_map->qs_id));
dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n", dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
qs_to_pri_map->priority); qs_to_pri_map->priority);
dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n", dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
...@@ -484,9 +491,10 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) ...@@ -484,9 +491,10 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
goto err_tm_cmd_send; goto err_tm_cmd_send;
nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id); dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n",
le16_to_cpu(nq_to_qs_map->nq_id));
dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n", dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
nq_to_qs_map->qset_id); le16_to_cpu(nq_to_qs_map->qset_id));
cmd = HCLGE_OPC_TM_PG_WEIGHT; cmd = HCLGE_OPC_TM_PG_WEIGHT;
hclge_cmd_setup_basic_desc(&desc, cmd, true); hclge_cmd_setup_basic_desc(&desc, cmd, true);
...@@ -505,7 +513,8 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) ...@@ -505,7 +513,8 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
goto err_tm_cmd_send; goto err_tm_cmd_send;
qs_weight = (struct hclge_qs_weight_cmd *)desc.data; qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id); dev_info(&hdev->pdev->dev, "QS qs_id: %u\n",
le16_to_cpu(qs_weight->qs_id));
dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr); dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
cmd = HCLGE_OPC_TM_PRI_WEIGHT; cmd = HCLGE_OPC_TM_PRI_WEIGHT;
...@@ -527,7 +536,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) ...@@ -527,7 +536,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id); dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n", dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
shap_cfg_cmd->pri_shapping_para); le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
cmd = HCLGE_OPC_TM_PRI_P_SHAPPING; cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true); hclge_cmd_setup_basic_desc(&desc, cmd, true);
...@@ -538,7 +547,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) ...@@ -538,7 +547,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id); dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n", dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
shap_cfg_cmd->pri_shapping_para); le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
hclge_dbg_dump_tm_pg(hdev); hclge_dbg_dump_tm_pg(hdev);
...@@ -658,7 +667,7 @@ static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev) ...@@ -658,7 +667,7 @@ static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n", dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
pause_param->pause_trans_gap); pause_param->pause_trans_gap);
dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n", dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
pause_param->pause_trans_time); le16_to_cpu(pause_param->pause_trans_time));
} }
static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev) static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
...@@ -712,7 +721,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -712,7 +721,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
tx_buf_cmd->tx_pkt_buff[i]); le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC; cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
hclge_cmd_setup_basic_desc(desc, cmd, true); hclge_cmd_setup_basic_desc(desc, cmd, true);
...@@ -724,10 +733,10 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -724,10 +733,10 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
rx_buf_cmd->buf_num[i]); le16_to_cpu(rx_buf_cmd->buf_num[i]));
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n", dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
rx_buf_cmd->shared_buf); le16_to_cpu(rx_buf_cmd->shared_buf));
cmd = HCLGE_OPC_RX_COM_WL_ALLOC; cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
hclge_cmd_setup_basic_desc(desc, cmd, true); hclge_cmd_setup_basic_desc(desc, cmd, true);
...@@ -738,7 +747,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -738,7 +747,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data; rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
dev_info(&hdev->pdev->dev, "\n"); dev_info(&hdev->pdev->dev, "\n");
dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
rx_com_wl->com_wl.high, rx_com_wl->com_wl.low); le16_to_cpu(rx_com_wl->com_wl.high),
le16_to_cpu(rx_com_wl->com_wl.low));
cmd = HCLGE_OPC_RX_GBL_PKT_CNT; cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
hclge_cmd_setup_basic_desc(desc, cmd, true); hclge_cmd_setup_basic_desc(desc, cmd, true);
...@@ -749,7 +759,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -749,7 +759,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data; rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
dev_info(&hdev->pdev->dev, dev_info(&hdev->pdev->dev,
"rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low); le16_to_cpu(rx_packet_cnt->com_wl.high),
le16_to_cpu(rx_packet_cnt->com_wl.low));
dev_info(&hdev->pdev->dev, "\n"); dev_info(&hdev->pdev->dev, "\n");
if (!hnae3_dev_dcb_supported(hdev)) { if (!hnae3_dev_dcb_supported(hdev)) {
...@@ -769,14 +780,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -769,14 +780,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev, dev_info(&hdev->pdev->dev,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low); le16_to_cpu(rx_priv_wl->tc_wl[i].high),
le16_to_cpu(rx_priv_wl->tc_wl[i].low));
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev, dev_info(&hdev->pdev->dev,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC, i + HCLGE_TC_NUM_ONE_DESC,
rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low); le16_to_cpu(rx_priv_wl->tc_wl[i].high),
le16_to_cpu(rx_priv_wl->tc_wl[i].low));
cmd = HCLGE_OPC_RX_COM_THRD_ALLOC; cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true); hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
...@@ -791,16 +804,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -791,16 +804,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev, dev_info(&hdev->pdev->dev,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
rx_com_thrd->com_thrd[i].high, le16_to_cpu(rx_com_thrd->com_thrd[i].high),
rx_com_thrd->com_thrd[i].low); le16_to_cpu(rx_com_thrd->com_thrd[i].low));
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev, dev_info(&hdev->pdev->dev,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC, i + HCLGE_TC_NUM_ONE_DESC,
rx_com_thrd->com_thrd[i].high, le16_to_cpu(rx_com_thrd->com_thrd[i].high),
rx_com_thrd->com_thrd[i].low); le16_to_cpu(rx_com_thrd->com_thrd[i].low));
return; return;
err_qos_cmd_send: err_qos_cmd_send:
...@@ -845,7 +858,8 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev) ...@@ -845,7 +858,8 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
snprintf(printf_buf, HCLGE_DBG_BUF_LEN, snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
"%02u |%02x:%02x:%02x:%02x:%02x:%02x|", "%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
req0->index, req0->mac_addr[0], req0->mac_addr[1], le16_to_cpu(req0->index),
req0->mac_addr[0], req0->mac_addr[1],
req0->mac_addr[2], req0->mac_addr[3], req0->mac_addr[2], req0->mac_addr[3],
req0->mac_addr[4], req0->mac_addr[5]); req0->mac_addr[4], req0->mac_addr[5]);
...@@ -929,7 +943,7 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev) ...@@ -929,7 +943,7 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
} }
} }
static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
{ {
dev_info(&hdev->pdev->dev, "PF reset count: %u\n", dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
hdev->rst_stats.pf_rst_cnt); hdev->rst_stats.pf_rst_cnt);
...@@ -945,8 +959,6 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) ...@@ -945,8 +959,6 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
hdev->rst_stats.hw_reset_done_cnt); hdev->rst_stats.hw_reset_done_cnt);
dev_info(&hdev->pdev->dev, "reset count: %u\n", dev_info(&hdev->pdev->dev, "reset count: %u\n",
hdev->rst_stats.reset_cnt); hdev->rst_stats.reset_cnt);
dev_info(&hdev->pdev->dev, "reset count: %u\n",
hdev->rst_stats.reset_cnt);
dev_info(&hdev->pdev->dev, "reset fail count: %u\n", dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
hdev->rst_stats.reset_fail_cnt); hdev->rst_stats.reset_fail_cnt);
dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
...@@ -961,6 +973,7 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) ...@@ -961,6 +973,7 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG)); hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING)); hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
} }
static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev) static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
......
...@@ -1747,7 +1747,7 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev, ...@@ -1747,7 +1747,7 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
if (vf_id) { if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) { if (vf_id >= hdev->num_alloc_vport) {
dev_err(dev, "invalid vf id(%d)\n", vf_id); dev_err(dev, "invalid vf id(%u)\n", vf_id);
return; return;
} }
......
...@@ -1398,7 +1398,7 @@ static int hclge_configure(struct hclge_dev *hdev) ...@@ -1398,7 +1398,7 @@ static int hclge_configure(struct hclge_dev *hdev)
if ((hdev->tc_max > HNAE3_MAX_TC) || if ((hdev->tc_max > HNAE3_MAX_TC) ||
(hdev->tc_max < 1)) { (hdev->tc_max < 1)) {
dev_warn(&hdev->pdev->dev, "TC num = %d.\n", dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
hdev->tc_max); hdev->tc_max);
hdev->tc_max = 1; hdev->tc_max = 1;
} }
...@@ -1658,7 +1658,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) ...@@ -1658,7 +1658,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
if (hdev->num_tqps < num_vport) { if (hdev->num_tqps < num_vport) {
dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
hdev->num_tqps, num_vport); hdev->num_tqps, num_vport);
return -EINVAL; return -EINVAL;
} }
...@@ -2345,7 +2345,7 @@ static int hclge_init_msi(struct hclge_dev *hdev) ...@@ -2345,7 +2345,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
} }
if (vectors < hdev->num_msi) if (vectors < hdev->num_msi)
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
hdev->num_msi, vectors); hdev->num_msi, vectors);
hdev->num_msi = vectors; hdev->num_msi = vectors;
...@@ -2777,7 +2777,7 @@ static void hclge_update_port_capability(struct hclge_mac *mac) ...@@ -2777,7 +2777,7 @@ static void hclge_update_port_capability(struct hclge_mac *mac)
else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
mac->module_type = HNAE3_MODULE_TYPE_TP; mac->module_type = HNAE3_MODULE_TYPE_TP;
if (mac->support_autoneg == true) { if (mac->support_autoneg) {
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
linkmode_copy(mac->advertising, mac->supported); linkmode_copy(mac->advertising, mac->supported);
} else { } else {
...@@ -3280,7 +3280,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev) ...@@ -3280,7 +3280,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"flr wait timeout: %d\n", cnt); "flr wait timeout: %u\n", cnt);
return -EBUSY; return -EBUSY;
} }
...@@ -3330,7 +3330,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) ...@@ -3330,7 +3330,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"set vf(%d) rst failed %d!\n", "set vf(%u) rst failed %d!\n",
vport->vport_id, ret); vport->vport_id, ret);
return ret; return ret;
} }
...@@ -3345,7 +3345,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) ...@@ -3345,7 +3345,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
ret = hclge_inform_reset_assert_to_vf(vport); ret = hclge_inform_reset_assert_to_vf(vport);
if (ret) if (ret)
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"inform reset to vf(%d) failed %d!\n", "inform reset to vf(%u) failed %d!\n",
vport->vport_id, ret); vport->vport_id, ret);
} }
...@@ -3658,7 +3658,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev) ...@@ -3658,7 +3658,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
hdev->rst_stats.reset_fail_cnt++; hdev->rst_stats.reset_fail_cnt++;
set_bit(hdev->reset_type, &hdev->reset_pending); set_bit(hdev->reset_type, &hdev->reset_pending);
dev_info(&hdev->pdev->dev, dev_info(&hdev->pdev->dev,
"re-schedule reset task(%d)\n", "re-schedule reset task(%u)\n",
hdev->rst_stats.reset_fail_cnt); hdev->rst_stats.reset_fail_cnt);
return true; return true;
} }
...@@ -3669,6 +3669,9 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev) ...@@ -3669,6 +3669,9 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
hclge_reset_handshake(hdev, true); hclge_reset_handshake(hdev, true);
dev_err(&hdev->pdev->dev, "Reset fail!\n"); dev_err(&hdev->pdev->dev, "Reset fail!\n");
hclge_dbg_dump_rst_info(hdev);
return false; return false;
} }
...@@ -3852,12 +3855,13 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) ...@@ -3852,12 +3855,13 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
HCLGE_RESET_INTERVAL))) { HCLGE_RESET_INTERVAL))) {
mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
return; return;
} else if (hdev->default_reset_request) } else if (hdev->default_reset_request) {
hdev->reset_level = hdev->reset_level =
hclge_get_reset_level(ae_dev, hclge_get_reset_level(ae_dev,
&hdev->default_reset_request); &hdev->default_reset_request);
else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
hdev->reset_level = HNAE3_FUNC_RESET; hdev->reset_level = HNAE3_FUNC_RESET;
}
dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
hdev->reset_level); hdev->reset_level);
...@@ -3982,6 +3986,7 @@ static void hclge_service_task(struct work_struct *work) ...@@ -3982,6 +3986,7 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_link_status(hdev); hclge_update_link_status(hdev);
hclge_update_vport_alive(hdev); hclge_update_vport_alive(hdev);
hclge_sync_vlan_filter(hdev); hclge_sync_vlan_filter(hdev);
if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) { if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
hclge_rfs_filter_expire(hdev); hclge_rfs_filter_expire(hdev);
hdev->fd_arfs_expire_timer = 0; hdev->fd_arfs_expire_timer = 0;
...@@ -4488,7 +4493,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev) ...@@ -4488,7 +4493,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
*/ */
if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Configure rss tc size failed, invalid TC_SIZE = %d\n", "Configure rss tc size failed, invalid TC_SIZE = %u\n",
rss_size); rss_size);
return -EINVAL; return -EINVAL;
} }
...@@ -4838,7 +4843,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) ...@@ -4838,7 +4843,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
break; break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Unsupported flow director mode %d\n", "Unsupported flow director mode %u\n",
hdev->fd_cfg.fd_mode); hdev->fd_cfg.fd_mode);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -5168,7 +5173,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, ...@@ -5168,7 +5173,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
true); true);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"fd key_y config fail, loc=%d, ret=%d\n", "fd key_y config fail, loc=%u, ret=%d\n",
rule->queue_id, ret); rule->queue_id, ret);
return ret; return ret;
} }
...@@ -5177,7 +5182,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, ...@@ -5177,7 +5182,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
true); true);
if (ret) if (ret)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"fd key_x config fail, loc=%d, ret=%d\n", "fd key_x config fail, loc=%u, ret=%d\n",
rule->queue_id, ret); rule->queue_id, ret);
return ret; return ret;
} }
...@@ -5426,7 +5431,7 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev, ...@@ -5426,7 +5431,7 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
} }
} else if (!is_add) { } else if (!is_add) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"delete fail, rule %d is inexistent\n", "delete fail, rule %u is inexistent\n",
location); location);
return -EINVAL; return -EINVAL;
} }
...@@ -5666,7 +5671,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, ...@@ -5666,7 +5671,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
if (vf > hdev->num_req_vfs) { if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Error: vf id (%d) > max vf num (%d)\n", "Error: vf id (%u) > max vf num (%u)\n",
vf, hdev->num_req_vfs); vf, hdev->num_req_vfs);
return -EINVAL; return -EINVAL;
} }
...@@ -5676,7 +5681,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, ...@@ -5676,7 +5681,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
if (ring >= tqps) { if (ring >= tqps) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Error: queue id (%d) > max tqp num (%d)\n", "Error: queue id (%u) > max tqp num (%u)\n",
ring, tqps - 1); ring, tqps - 1);
return -EINVAL; return -EINVAL;
} }
...@@ -5735,7 +5740,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, ...@@ -5735,7 +5740,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
if (!hclge_fd_rule_exist(hdev, fs->location)) { if (!hclge_fd_rule_exist(hdev, fs->location)) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Delete fail, rule %d is inexistent\n", fs->location); "Delete fail, rule %u is inexistent\n", fs->location);
return -ENOENT; return -ENOENT;
} }
...@@ -5812,7 +5817,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) ...@@ -5812,7 +5817,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
if (ret) { if (ret) {
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"Restore rule %d failed, remove it\n", "Restore rule %u failed, remove it\n",
rule->location); rule->location);
clear_bit(rule->location, hdev->fd_bmap); clear_bit(rule->location, hdev->fd_bmap);
hlist_del(&rule->rule_node); hlist_del(&rule->rule_node);
...@@ -6805,7 +6810,7 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, ...@@ -6805,7 +6810,7 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
if (cmdq_resp) { if (cmdq_resp) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
cmdq_resp); cmdq_resp);
return -EIO; return -EIO;
} }
...@@ -7057,7 +7062,7 @@ static int hclge_init_umv_space(struct hclge_dev *hdev) ...@@ -7057,7 +7062,7 @@ static int hclge_init_umv_space(struct hclge_dev *hdev)
if (allocated_size < hdev->wanted_umv_size) if (allocated_size < hdev->wanted_umv_size)
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"Alloc umv space failed, want %d, get %d\n", "Alloc umv space failed, want %u, get %u\n",
hdev->wanted_umv_size, allocated_size); hdev->wanted_umv_size, allocated_size);
mutex_init(&hdev->umv_mutex); mutex_init(&hdev->umv_mutex);
...@@ -7225,7 +7230,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, ...@@ -7225,7 +7230,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
/* check if we just hit the duplicate */ /* check if we just hit the duplicate */
if (!ret) { if (!ret) {
dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n", dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
vport->vport_id, addr); vport->vport_id, addr);
return 0; return 0;
} }
...@@ -7406,7 +7411,7 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, ...@@ -7406,7 +7411,7 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC; mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
list_for_each_entry_safe(mac_cfg, tmp, list, node) { list_for_each_entry_safe(mac_cfg, tmp, list, node) {
if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) { if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
if (uc_flag && mac_cfg->hd_tbl_status) if (uc_flag && mac_cfg->hd_tbl_status)
hclge_rm_uc_addr_common(vport, mac_addr); hclge_rm_uc_addr_common(vport, mac_addr);
...@@ -7478,7 +7483,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, ...@@ -7478,7 +7483,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
if (cmdq_resp) { if (cmdq_resp) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
cmdq_resp); cmdq_resp);
return -EIO; return -EIO;
} }
...@@ -7500,7 +7505,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, ...@@ -7500,7 +7505,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
break; break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"add mac ethertype failed for undefined, code=%d.\n", "add mac ethertype failed for undefined, code=%u.\n",
resp_code); resp_code);
return_status = -EIO; return_status = -EIO;
} }
...@@ -7741,8 +7746,6 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, ...@@ -7741,8 +7746,6 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
bool is_kill, u16 vlan, bool is_kill, u16 vlan,
__be16 proto) __be16 proto)
{ {
#define HCLGE_MAX_VF_BYTES 16
struct hclge_vport *vport = &hdev->vport[vfid]; struct hclge_vport *vport = &hdev->vport[vfid];
struct hclge_vlan_filter_vf_cfg_cmd *req0; struct hclge_vlan_filter_vf_cfg_cmd *req0;
struct hclge_vlan_filter_vf_cfg_cmd *req1; struct hclge_vlan_filter_vf_cfg_cmd *req1;
...@@ -7807,7 +7810,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, ...@@ -7807,7 +7810,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
} }
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Add vf vlan filter fail, ret =%d.\n", "Add vf vlan filter fail, ret =%u.\n",
req0->resp_code); req0->resp_code);
} else { } else {
#define HCLGE_VF_VLAN_DEL_NO_FOUND 1 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
...@@ -7823,7 +7826,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, ...@@ -7823,7 +7826,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
return 0; return 0;
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Kill vf vlan filter fail, ret =%d.\n", "Kill vf vlan filter fail, ret =%u.\n",
req0->resp_code); req0->resp_code);
} }
...@@ -7842,9 +7845,10 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, ...@@ -7842,9 +7845,10 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
vlan_offset_160 = vlan_id / 160; vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
vlan_offset_byte = (vlan_id % 160) / 8; vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
vlan_offset_byte_val = 1 << (vlan_id % 8); HCLGE_VLAN_BYTE_SIZE;
vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
req->vlan_offset = vlan_offset_160; req->vlan_offset = vlan_offset_160;
...@@ -7872,7 +7876,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, ...@@ -7872,7 +7876,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
proto); proto);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Set %d vport vlan filter config fail, ret =%d.\n", "Set %u vport vlan filter config fail, ret =%d.\n",
vport_id, ret); vport_id, ret);
return ret; return ret;
} }
...@@ -7884,7 +7888,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, ...@@ -7884,7 +7888,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Add port vlan failed, vport %d is already in vlan %d\n", "Add port vlan failed, vport %u is already in vlan %u\n",
vport_id, vlan_id); vport_id, vlan_id);
return -EINVAL; return -EINVAL;
} }
...@@ -7892,7 +7896,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, ...@@ -7892,7 +7896,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (is_kill && if (is_kill &&
!test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Delete port vlan failed, vport %d is not in vlan %d\n", "Delete port vlan failed, vport %u is not in vlan %u\n",
vport_id, vlan_id); vport_id, vlan_id);
return -EINVAL; return -EINVAL;
} }
...@@ -8548,6 +8552,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) ...@@ -8548,6 +8552,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
int i, max_frm_size, ret; int i, max_frm_size, ret;
/* HW supprt 2 layer vlan */
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME || if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
max_frm_size > HCLGE_MAC_MAX_FRAME) max_frm_size > HCLGE_MAC_MAX_FRAME)
...@@ -8963,16 +8968,16 @@ static void hclge_info_show(struct hclge_dev *hdev) ...@@ -8963,16 +8968,16 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "PF info begin:\n"); dev_info(dev, "PF info begin:\n");
dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport); dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs); dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size); dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size); dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size); dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
dev_info(dev, "This is %s PF\n", dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n", dev_info(dev, "DCB %s\n",
...@@ -8988,10 +8993,9 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, ...@@ -8988,10 +8993,9 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
{ {
struct hnae3_client *client = vport->nic.client; struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv; struct hclge_dev *hdev = ae_dev->priv;
int rst_cnt; int rst_cnt = hdev->rst_stats.reset_cnt;
int ret; int ret;
rst_cnt = hdev->rst_stats.reset_cnt;
ret = client->ops->init_instance(&vport->nic); ret = client->ops->init_instance(&vport->nic);
if (ret) if (ret)
return ret; return ret;
...@@ -9091,7 +9095,6 @@ static int hclge_init_client_instance(struct hnae3_client *client, ...@@ -9091,7 +9095,6 @@ static int hclge_init_client_instance(struct hnae3_client *client,
switch (client->type) { switch (client->type) {
case HNAE3_CLIENT_KNIC: case HNAE3_CLIENT_KNIC:
hdev->nic_client = client; hdev->nic_client = client;
vport->nic.client = client; vport->nic.client = client;
ret = hclge_init_nic_client_instance(ae_dev, vport); ret = hclge_init_nic_client_instance(ae_dev, vport);
...@@ -9290,7 +9293,7 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev) ...@@ -9290,7 +9293,7 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
ret = hclge_set_vf_rst(hdev, vport->vport_id, false); ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
if (ret) if (ret)
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"clear vf(%d) rst failed %d!\n", "clear vf(%u) rst failed %d!\n",
vport->vport_id, ret); vport->vport_id, ret);
} }
} }
...@@ -9312,6 +9315,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -9312,6 +9315,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->reset_type = HNAE3_NONE_RESET; hdev->reset_type = HNAE3_NONE_RESET;
hdev->reset_level = HNAE3_FUNC_RESET; hdev->reset_level = HNAE3_FUNC_RESET;
ae_dev->priv = hdev; ae_dev->priv = hdev;
/* HW supprt 2 layer vlan */
hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
mutex_init(&hdev->vport_lock); mutex_init(&hdev->vport_lock);
...@@ -9909,8 +9914,8 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, ...@@ -9909,8 +9914,8 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
int cur_rss_size = kinfo->rss_size; u16 cur_rss_size = kinfo->rss_size;
int cur_tqps = kinfo->num_tqps; u16 cur_tqps = kinfo->num_tqps;
u16 tc_valid[HCLGE_MAX_TC_NUM]; u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 roundup_size; u16 roundup_size;
u32 *rss_indir; u32 *rss_indir;
...@@ -9964,7 +9969,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, ...@@ -9964,7 +9969,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
out: out:
if (!ret) if (!ret)
dev_info(&hdev->pdev->dev, dev_info(&hdev->pdev->dev,
"Channels changed, rss_size from %d to %d, tqps from %d to %d", "Channels changed, rss_size from %u to %u, tqps from %u to %u",
cur_rss_size, kinfo->rss_size, cur_rss_size, kinfo->rss_size,
cur_tqps, kinfo->rss_size * kinfo->num_tc); cur_tqps, kinfo->rss_size * kinfo->num_tc);
......
...@@ -141,7 +141,6 @@ ...@@ -141,7 +141,6 @@
/* Factor used to calculate offset and bitmap of VF num */ /* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64 #define HCLGE_VF_NUM_PER_CMD 64
#define HCLGE_VF_NUM_PER_BYTE 8
enum HLCGE_PORT_TYPE { enum HLCGE_PORT_TYPE {
HOST_PORT, HOST_PORT,
...@@ -656,7 +655,6 @@ struct hclge_rst_stats { ...@@ -656,7 +655,6 @@ struct hclge_rst_stats {
u32 hw_reset_done_cnt; /* the number of HW reset has completed */ u32 hw_reset_done_cnt; /* the number of HW reset has completed */
u32 pf_rst_cnt; /* the number of PF reset */ u32 pf_rst_cnt; /* the number of PF reset */
u32 flr_rst_cnt; /* the number of FLR */ u32 flr_rst_cnt; /* the number of FLR */
u32 core_rst_cnt; /* the number of CORE reset */
u32 global_rst_cnt; /* the number of GLOBAL */ u32 global_rst_cnt; /* the number of GLOBAL */
u32 imp_rst_cnt; /* the number of IMP reset */ u32 imp_rst_cnt; /* the number of IMP reset */
u32 reset_cnt; /* the number of reset */ u32 reset_cnt; /* the number of reset */
...@@ -1005,4 +1003,5 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, ...@@ -1005,4 +1003,5 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
void hclge_report_hw_error(struct hclge_dev *hdev, void hclge_report_hw_error(struct hclge_dev *hdev,
enum hnae3_hw_error_type type); enum hnae3_hw_error_type type);
void hclge_inform_vf_promisc_info(struct hclge_vport *vport); void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
void hclge_dbg_dump_rst_info(struct hclge_dev *hdev);
#endif #endif
...@@ -26,7 +26,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, ...@@ -26,7 +26,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"PF fail to gen resp to VF len %d exceeds max len %d\n", "PF fail to gen resp to VF len %u exceeds max len %u\n",
resp_data_len, resp_data_len,
HCLGE_MBX_MAX_RESP_DATA_SIZE); HCLGE_MBX_MAX_RESP_DATA_SIZE);
/* If resp_data_len is too long, set the value to max length /* If resp_data_len is too long, set the value to max length
...@@ -285,7 +285,7 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, ...@@ -285,7 +285,7 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
false, HCLGE_MAC_ADDR_UC); false, HCLGE_MAC_ADDR_UC);
} else { } else {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"failed to set unicast mac addr, unknown subcode %d\n", "failed to set unicast mac addr, unknown subcode %u\n",
mbx_req->msg[1]); mbx_req->msg[1]);
return -EIO; return -EIO;
} }
...@@ -319,7 +319,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, ...@@ -319,7 +319,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
false, HCLGE_MAC_ADDR_MC); false, HCLGE_MAC_ADDR_MC);
} else { } else {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %d\n", "failed to set mcast mac addr, unknown subcode %u\n",
mbx_req->msg[1]); mbx_req->msg[1]);
return -EIO; return -EIO;
} }
...@@ -555,7 +555,7 @@ static void hclge_reset_vf(struct hclge_vport *vport, ...@@ -555,7 +555,7 @@ static void hclge_reset_vf(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
int ret; int ret;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!", dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
vport->vport_id); vport->vport_id);
ret = hclge_func_reset_cmd(hdev, vport->vport_id); ret = hclge_func_reset_cmd(hdev, vport->vport_id);
...@@ -590,7 +590,8 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, ...@@ -590,7 +590,8 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf)); memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2); return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
sizeof(resp_data));
} }
static int hclge_get_rss_key(struct hclge_vport *vport, static int hclge_get_rss_key(struct hclge_vport *vport,
...@@ -680,7 +681,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -680,7 +681,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n", "dropped invalid mailbox message, code = %u\n",
req->msg[0]); req->msg[0]);
/* dropping/not processing this invalid message */ /* dropping/not processing this invalid message */
...@@ -827,7 +828,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -827,7 +828,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
break; break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n", "un-supported mailbox message, code = %u\n",
req->msg[0]); req->msg[0]);
break; break;
} }
......
...@@ -134,7 +134,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev) ...@@ -134,7 +134,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
"no phy device is connected to mdio bus\n"); "no phy device is connected to mdio bus\n");
return 0; return 0;
} else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n", dev_err(&hdev->pdev->dev, "phy_addr(%u) is too large.\n",
hdev->hw.mac.phy_addr); hdev->hw.mac.phy_addr);
return -EINVAL; return -EINVAL;
} }
......
...@@ -544,7 +544,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) ...@@ -544,7 +544,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"vf%d, qs%u failed to set tx_rate:%d, ret=%d\n", "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
vport->vport_id, shap_cfg_cmd->qs_id, vport->vport_id, shap_cfg_cmd->qs_id,
max_tx_rate, ret); max_tx_rate, ret);
return ret; return ret;
...@@ -575,7 +575,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -575,7 +575,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
/* Set to user value, no larger than max_rss_size. */ /* Set to user value, no larger than max_rss_size. */
if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
kinfo->req_rss_size <= max_rss_size) { kinfo->req_rss_size <= max_rss_size) {
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
kinfo->rss_size, kinfo->req_rss_size); kinfo->rss_size, kinfo->req_rss_size);
kinfo->rss_size = kinfo->req_rss_size; kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size || } else if (kinfo->rss_size > max_rss_size ||
......
...@@ -50,7 +50,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw) ...@@ -50,7 +50,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
rmb(); /* Make sure head is ready before touch any data */ rmb(); /* Make sure head is ready before touch any data */
if (!hclgevf_is_valid_csq_clean_head(csq, head)) { if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
csq->next_to_use, csq->next_to_clean); csq->next_to_use, csq->next_to_clean);
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"Disabling any further commands to IMP firmware\n"); "Disabling any further commands to IMP firmware\n");
...@@ -92,9 +92,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) ...@@ -92,9 +92,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
u32 reg_val; u32 reg_val;
if (ring->flag == HCLGEVF_TYPE_CSQ) { if (ring->flag == HCLGEVF_TYPE_CSQ) {
reg_val = (u32)ring->desc_dma_addr; reg_val = lower_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); reg_val = upper_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG); reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
...@@ -105,9 +105,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) ...@@ -105,9 +105,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
} else { } else {
reg_val = (u32)ring->desc_dma_addr; reg_val = lower_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); reg_val = upper_32_bits(ring->desc_dma_addr);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
......
...@@ -1549,12 +1549,39 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) ...@@ -1549,12 +1549,39 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
return ret; return ret;
} }
static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
{
dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
hdev->rst_stats.vf_func_rst_cnt);
dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
hdev->rst_stats.flr_rst_cnt);
dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
hdev->rst_stats.vf_rst_cnt);
dev_info(&hdev->pdev->dev, "reset done count: %u\n",
hdev->rst_stats.rst_done_cnt);
dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
hdev->rst_stats.hw_rst_done_cnt);
dev_info(&hdev->pdev->dev, "reset count: %u\n",
hdev->rst_stats.rst_cnt);
dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
hdev->rst_stats.rst_fail_cnt);
dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG));
dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
}
static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
{ {
/* recover handshake status with IMP when reset fail */ /* recover handshake status with IMP when reset fail */
hclgevf_reset_handshake(hdev, true); hclgevf_reset_handshake(hdev, true);
hdev->rst_stats.rst_fail_cnt++; hdev->rst_stats.rst_fail_cnt++;
dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n", dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
hdev->rst_stats.rst_fail_cnt); hdev->rst_stats.rst_fail_cnt);
if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
...@@ -1563,6 +1590,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) ...@@ -1563,6 +1590,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
if (hclgevf_is_reset_pending(hdev)) { if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev); hclgevf_reset_task_schedule(hdev);
} else {
hclgevf_dump_rst_info(hdev);
} }
} }
...@@ -1784,6 +1813,8 @@ static void hclgevf_service_timer(struct timer_list *t) ...@@ -1784,6 +1813,8 @@ static void hclgevf_service_timer(struct timer_list *t)
static void hclgevf_reset_service_task(struct work_struct *work) static void hclgevf_reset_service_task(struct work_struct *work)
{ {
#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
struct hclgevf_dev *hdev = struct hclgevf_dev *hdev =
container_of(work, struct hclgevf_dev, rst_service_task); container_of(work, struct hclgevf_dev, rst_service_task);
int ret; int ret;
...@@ -1836,7 +1867,7 @@ static void hclgevf_reset_service_task(struct work_struct *work) ...@@ -1836,7 +1867,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
* We cannot do much for 2. but to check first we can try reset * We cannot do much for 2. but to check first we can try reset
* our PCIe + stack and see if it alleviates the problem. * our PCIe + stack and see if it alleviates the problem.
*/ */
if (hdev->reset_attempts > 3) { if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
/* prepare for full reset of stack + pcie interface */ /* prepare for full reset of stack + pcie interface */
set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
...@@ -2139,7 +2170,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) ...@@ -2139,7 +2170,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
if (ret) if (ret)
return ret; return ret;
} }
/* Initialize RSS indirect table */ /* Initialize RSS indirect table */
...@@ -2308,7 +2338,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) ...@@ -2308,7 +2338,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
} }
if (vectors < hdev->num_msi) if (vectors < hdev->num_msi)
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
hdev->num_msi, vectors); hdev->num_msi, vectors);
hdev->num_msi = vectors; hdev->num_msi = vectors;
...@@ -2384,12 +2414,12 @@ static void hclgevf_info_show(struct hclgevf_dev *hdev) ...@@ -2384,12 +2414,12 @@ static void hclgevf_info_show(struct hclgevf_dev *hdev)
dev_info(dev, "VF info begin:\n"); dev_info(dev, "VF info begin:\n");
dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
dev_info(dev, "PF media type of this VF: %d\n", dev_info(dev, "PF media type of this VF: %u\n",
hdev->hw.mac.media_type); hdev->hw.mac.media_type);
dev_info(dev, "VF info end.\n"); dev_info(dev, "VF info end.\n");
......
...@@ -33,7 +33,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, ...@@ -33,7 +33,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF mbx response len(=%d) exceeds maximum(=%d)\n", "VF mbx response len(=%u) exceeds maximum(=%u)\n",
resp_len, resp_len,
HCLGE_MBX_MAX_RESP_DATA_SIZE); HCLGE_MBX_MAX_RESP_DATA_SIZE);
return -EINVAL; return -EINVAL;
...@@ -49,7 +49,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, ...@@ -49,7 +49,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (i >= HCLGEVF_MAX_TRY_TIMES) { if (i >= HCLGEVF_MAX_TRY_TIMES) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n", "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
code0, code1, hdev->mbx_resp.received_resp, i); code0, code1, hdev->mbx_resp.received_resp, i);
return -EIO; return -EIO;
} }
...@@ -68,10 +68,10 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, ...@@ -68,10 +68,10 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) { if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF could not match resp code(code0=%d,code1=%d), %d\n", "VF could not match resp code(code0=%u,code1=%u), %d\n",
code0, code1, mbx_resp->resp_status); code0, code1, mbx_resp->resp_status);
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF could not match resp r_code(r_code0=%d,r_code1=%d)\n", "VF could not match resp r_code(r_code0=%u,r_code1=%u)\n",
r_code0, r_code1); r_code0, r_code1);
return -EIO; return -EIO;
} }
...@@ -168,7 +168,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -168,7 +168,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n", "dropped invalid mailbox message, code = %u\n",
req->msg[0]); req->msg[0]);
/* dropping/not processing this invalid message */ /* dropping/not processing this invalid message */
...@@ -187,7 +187,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -187,7 +187,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_PF_VF_RESP: case HCLGE_MBX_PF_VF_RESP:
if (resp->received_resp) if (resp->received_resp)
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"VF mbx resp flag not clear(%d)\n", "VF mbx resp flag not clear(%u)\n",
req->msg[1]); req->msg[1]);
resp->received_resp = true; resp->received_resp = true;
...@@ -219,7 +219,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -219,7 +219,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
if (atomic_read(&hdev->arq.count) >= if (atomic_read(&hdev->arq.count) >=
HCLGE_MBX_MAX_ARQ_MSG_NUM) { HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"Async Q full, dropping msg(%d)\n", "Async Q full, dropping msg(%u)\n",
req->msg[1]); req->msg[1]);
break; break;
} }
...@@ -236,7 +236,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -236,7 +236,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
break; break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF received unsupported(%d) mbx msg from PF\n", "VF received unsupported(%u) mbx msg from PF\n",
req->msg[0]); req->msg[0]);
break; break;
} }
...@@ -327,7 +327,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) ...@@ -327,7 +327,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
break; break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"fetched unsupported(%d) message from arq\n", "fetched unsupported(%u) message from arq\n",
msg_q[0]); msg_q[0]);
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment