Commit 603d11c4 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: updates for -next

To facilitate code maintenance and compatibility, #1 and #2 add
device version to replace pci revision, #3 to #9 adds support for
querying device capabilities and specifications, then the driver
can use these query results to implement corresponding features
(some features will be implemented later).

And #10 is a minor cleanup since too many parameters for
hclge_shaper_para_calc().
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 090bc03b ff7e4d0d
...@@ -34,6 +34,13 @@ ...@@ -34,6 +34,13 @@
#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */ #define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */
/* Device version */
#define HNAE3_DEVICE_VERSION_V1 0x00020
#define HNAE3_DEVICE_VERSION_V2 0x00021
#define HNAE3_DEVICE_VERSION_V3 0x00030
#define HNAE3_PCI_REVISION_BIT_SIZE 8
/* Device IDs */ /* Device IDs */
#define HNAE3_DEV_ID_GE 0xA220 #define HNAE3_DEV_ID_GE 0xA220
#define HNAE3_DEV_ID_25GE 0xA221 #define HNAE3_DEV_ID_25GE 0xA221
...@@ -54,8 +61,6 @@ ...@@ -54,8 +61,6 @@
#define HNAE3_KNIC_CLIENT_INITED_B 0x3 #define HNAE3_KNIC_CLIENT_INITED_B 0x3
#define HNAE3_UNIC_CLIENT_INITED_B 0x4 #define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5 #define HNAE3_ROCE_CLIENT_INITED_B 0x5
#define HNAE3_DEV_SUPPORT_FD_B 0x6
#define HNAE3_DEV_SUPPORT_GRO_B 0x7
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B)) BIT(HNAE3_DEV_SUPPORT_ROCE_B))
...@@ -66,11 +71,64 @@ ...@@ -66,11 +71,64 @@
#define hnae3_dev_dcb_supported(hdev) \ #define hnae3_dev_dcb_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_FD_B,
HNAE3_DEV_SUPPORT_GRO_B,
HNAE3_DEV_SUPPORT_FEC_B,
HNAE3_DEV_SUPPORT_UDP_GSO_B,
HNAE3_DEV_SUPPORT_QB_B,
HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B,
HNAE3_DEV_SUPPORT_PTP_B,
HNAE3_DEV_SUPPORT_INT_QL_B,
HNAE3_DEV_SUPPORT_SIMPLE_BD_B,
HNAE3_DEV_SUPPORT_TX_PUSH_B,
HNAE3_DEV_SUPPORT_PHY_IMP_B,
HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B,
HNAE3_DEV_SUPPORT_HW_PAD_B,
HNAE3_DEV_SUPPORT_STASH_B,
};
#define hnae3_dev_fd_supported(hdev) \ #define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B) test_bit(HNAE3_DEV_SUPPORT_FD_B, (hdev)->ae_dev->caps)
#define hnae3_dev_gro_supported(hdev) \ #define hnae3_dev_gro_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B) test_bit(HNAE3_DEV_SUPPORT_GRO_B, (hdev)->ae_dev->caps)
#define hnae3_dev_fec_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_FEC_B, (hdev)->ae_dev->caps)
#define hnae3_dev_udp_gso_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, (hdev)->ae_dev->caps)
#define hnae3_dev_qb_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_QB_B, (hdev)->ae_dev->caps)
#define hnae3_dev_fd_forward_tc_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, (hdev)->ae_dev->caps)
#define hnae3_dev_ptp_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_PTP_B, (hdev)->ae_dev->caps)
#define hnae3_dev_int_ql_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, (hdev)->ae_dev->caps)
#define hnae3_dev_simple_bd_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_SIMPLE_BD_B, (hdev)->ae_dev->caps)
#define hnae3_dev_tx_push_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, (hdev)->ae_dev->caps)
#define hnae3_dev_phy_imp_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (hdev)->ae_dev->caps)
#define hnae3_dev_tqp_txrx_indep_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (hdev)->ae_dev->caps)
#define hnae3_dev_hw_pad_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_HW_PAD_B, (hdev)->ae_dev->caps)
#define hnae3_dev_stash_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_STASH_B, (hdev)->ae_dev->caps)
#define ring_ptr_move_fw(ring, p) \ #define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num) ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
...@@ -209,6 +267,17 @@ struct hnae3_ring_chain_node { ...@@ -209,6 +267,17 @@ struct hnae3_ring_chain_node {
#define HNAE3_IS_TX_RING(node) \ #define HNAE3_IS_TX_RING(node) \
(((node)->flag & (1 << HNAE3_RING_TYPE_B)) == HNAE3_RING_TYPE_TX) (((node)->flag & (1 << HNAE3_RING_TYPE_B)) == HNAE3_RING_TYPE_TX)
/* device specification info from firmware */
struct hnae3_dev_specs {
u32 mac_entry_num; /* number of mac-vlan table entry */
u32 mng_entry_num; /* number of manager table entry */
u32 max_tm_rate;
u16 rss_ind_tbl_size;
u16 rss_key_size;
u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */
u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
};
struct hnae3_client_ops { struct hnae3_client_ops {
int (*init_instance)(struct hnae3_handle *handle); int (*init_instance)(struct hnae3_handle *handle);
void (*uninit_instance)(struct hnae3_handle *handle, bool reset); void (*uninit_instance)(struct hnae3_handle *handle, bool reset);
...@@ -229,12 +298,16 @@ struct hnae3_client { ...@@ -229,12 +298,16 @@ struct hnae3_client {
struct list_head node; struct list_head node;
}; };
#define HNAE3_DEV_CAPS_MAX_NUM 96
struct hnae3_ae_dev { struct hnae3_ae_dev {
struct pci_dev *pdev; struct pci_dev *pdev;
const struct hnae3_ae_ops *ops; const struct hnae3_ae_ops *ops;
struct list_head node; struct list_head node;
u32 flag; u32 flag;
unsigned long hw_err_reset_req; unsigned long hw_err_reset_req;
struct hnae3_dev_specs dev_specs;
u32 dev_version;
unsigned long caps[BITS_TO_LONGS(HNAE3_DEV_CAPS_MAX_NUM)];
void *priv; void *priv;
}; };
......
...@@ -244,6 +244,7 @@ static void hns3_dbg_help(struct hnae3_handle *h) ...@@ -244,6 +244,7 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "queue info <number>\n"); dev_info(&h->pdev->dev, "queue info <number>\n");
dev_info(&h->pdev->dev, "queue map\n"); dev_info(&h->pdev->dev, "queue map\n");
dev_info(&h->pdev->dev, "bd info <q_num> <bd index>\n"); dev_info(&h->pdev->dev, "bd info <q_num> <bd index>\n");
dev_info(&h->pdev->dev, "dev capability\n");
if (!hns3_is_phys_func(h->pdev)) if (!hns3_is_phys_func(h->pdev))
return; return;
...@@ -285,6 +286,27 @@ static void hns3_dbg_help(struct hnae3_handle *h) ...@@ -285,6 +286,27 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "%s", printf_buf); dev_info(&h->pdev->dev, "%s", printf_buf);
} }
static void hns3_dbg_dev_caps(struct hnae3_handle *h)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
unsigned long *caps;
caps = ae_dev->caps;
dev_info(&h->pdev->dev, "support FD: %s\n",
test_bit(HNAE3_DEV_SUPPORT_FD_B, caps) ? "yes" : "no");
dev_info(&h->pdev->dev, "support GRO: %s\n",
test_bit(HNAE3_DEV_SUPPORT_GRO_B, caps) ? "yes" : "no");
dev_info(&h->pdev->dev, "support FEC: %s\n",
test_bit(HNAE3_DEV_SUPPORT_FEC_B, caps) ? "yes" : "no");
dev_info(&h->pdev->dev, "support UDP GSO: %s\n",
test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, caps) ? "yes" : "no");
dev_info(&h->pdev->dev, "support PTP: %s\n",
test_bit(HNAE3_DEV_SUPPORT_PTP_B, caps) ? "yes" : "no");
dev_info(&h->pdev->dev, "support INT QL: %s\n",
test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, caps) ? "yes" : "no");
}
static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer, static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
...@@ -360,6 +382,8 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer, ...@@ -360,6 +382,8 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
ret = hns3_dbg_queue_map(handle); ret = hns3_dbg_queue_map(handle);
else if (strncmp(cmd_buf, "bd info", 7) == 0) else if (strncmp(cmd_buf, "bd info", 7) == 0)
ret = hns3_dbg_bd_info(handle, cmd_buf); ret = hns3_dbg_bd_info(handle, cmd_buf);
else if (strncmp(cmd_buf, "dev capability", 14) == 0)
hns3_dbg_dev_caps(handle);
else if (handle->ae_algo->ops->dbg_run_cmd) else if (handle->ae_algo->ops->dbg_run_cmd)
ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf); ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
else else
......
...@@ -629,9 +629,11 @@ void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) ...@@ -629,9 +629,11 @@ void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
{ {
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle; struct hnae3_handle *h = priv->ae_handle;
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
bool last_state; bool last_state;
if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) { if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 &&
h->ae_algo->ops->enable_vlan_filter) {
last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false; last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
if (enable != last_state) { if (enable != last_state) {
netdev_info(netdev, netdev_info(netdev,
...@@ -2074,15 +2076,6 @@ static void hns3_disable_sriov(struct pci_dev *pdev) ...@@ -2074,15 +2076,6 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev); pci_disable_sriov(pdev);
} }
static void hns3_get_dev_capability(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev)
{
if (pdev->revision >= 0x21) {
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
}
}
/* hns3_probe - Device initialization routine /* hns3_probe - Device initialization routine
* @pdev: PCI device information struct * @pdev: PCI device information struct
* @ent: entry in hns3_pci_tbl * @ent: entry in hns3_pci_tbl
...@@ -2104,7 +2097,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2104,7 +2097,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->pdev = pdev; ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data; ae_dev->flag = ent->driver_data;
hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev); pci_set_drvdata(pdev, ae_dev);
ret = hnae3_register_ae_dev(ae_dev); ret = hnae3_register_ae_dev(ae_dev);
...@@ -2265,6 +2257,7 @@ static void hns3_set_default_feature(struct net_device *netdev) ...@@ -2265,6 +2257,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
struct pci_dev *pdev = h->pdev; struct pci_dev *pdev = h->pdev;
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_UNICAST_FLT;
...@@ -2302,7 +2295,7 @@ static void hns3_set_default_feature(struct net_device *netdev) ...@@ -2302,7 +2295,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
NETIF_F_FRAGLIST; NETIF_F_FRAGLIST;
if (pdev->revision >= 0x21) { if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
netdev->hw_features |= NETIF_F_GRO_HW; netdev->hw_features |= NETIF_F_GRO_HW;
netdev->features |= NETIF_F_GRO_HW; netdev->features |= NETIF_F_GRO_HW;
...@@ -2801,8 +2794,9 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, ...@@ -2801,8 +2794,9 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
{ {
struct hnae3_handle *handle = ring->tqp->handle; struct hnae3_handle *handle = ring->tqp->handle;
struct pci_dev *pdev = ring->tqp->handle->pdev; struct pci_dev *pdev = ring->tqp->handle->pdev;
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
if (pdev->revision == 0x20) { if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) {
*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
if (!(*vlan_tag & VLAN_VID_MASK)) if (!(*vlan_tag & VLAN_VID_MASK))
*vlan_tag = le16_to_cpu(desc->rx.vlan_tag); *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
......
...@@ -77,6 +77,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { ...@@ -77,6 +77,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{ {
struct hnae3_handle *h = hns3_get_handle(ndev); struct hnae3_handle *h = hns3_get_handle(ndev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
bool vlan_filter_enable; bool vlan_filter_enable;
int ret; int ret;
...@@ -96,7 +97,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) ...@@ -96,7 +97,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
break; break;
} }
if (ret || h->pdev->revision >= 0x21) if (ret || ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
return ret; return ret;
if (en) { if (en) {
...@@ -147,6 +148,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb) ...@@ -147,6 +148,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
struct net_device *ndev = skb->dev; struct net_device *ndev = skb->dev;
struct hnae3_handle *handle; struct hnae3_handle *handle;
struct hnae3_ae_dev *ae_dev;
unsigned char *packet; unsigned char *packet;
struct ethhdr *ethh; struct ethhdr *ethh;
unsigned int i; unsigned int i;
...@@ -163,7 +165,8 @@ static void hns3_lp_setup_skb(struct sk_buff *skb) ...@@ -163,7 +165,8 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
* the purpose of mac or serdes selftest. * the purpose of mac or serdes selftest.
*/ */
handle = hns3_get_handle(ndev); handle = hns3_get_handle(ndev);
if (handle->pdev->revision == 0x20) ae_dev = pci_get_drvdata(handle->pdev);
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR; ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
eth_zero_addr(ethh->h_source); eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP); ethh->h_proto = htons(ETH_P_ARP);
...@@ -761,6 +764,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev, ...@@ -761,6 +764,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd) const struct ethtool_link_ksettings *cmd)
{ {
struct hnae3_handle *handle = hns3_get_handle(netdev); struct hnae3_handle *handle = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops; const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
int ret; int ret;
...@@ -782,7 +786,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev, ...@@ -782,7 +786,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
return phy_ethtool_ksettings_set(netdev->phydev, cmd); return phy_ethtool_ksettings_set(netdev->phydev, cmd);
} }
if (handle->pdev->revision == 0x20) if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP; return -EOPNOTSUPP;
ret = hns3_check_ksettings_param(netdev, cmd); ret = hns3_check_ksettings_param(netdev, cmd);
...@@ -846,11 +850,12 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir, ...@@ -846,11 +850,12 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc) const u8 *key, const u8 hfunc)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
if (!h->ae_algo->ops->set_rss) if (!h->ae_algo->ops->set_rss)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if ((h->pdev->revision == 0x20 && if ((ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 &&
hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) { hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
netdev_err(netdev, "hash func not supported\n"); netdev_err(netdev, "hash func not supported\n");
...@@ -1071,9 +1076,6 @@ static int hns3_nway_reset(struct net_device *netdev) ...@@ -1071,9 +1076,6 @@ static int hns3_nway_reset(struct net_device *netdev)
if (phy) if (phy)
return genphy_restart_aneg(phy); return genphy_restart_aneg(phy);
if (handle->pdev->revision == 0x20)
return -EOPNOTSUPP;
return ops->restart_autoneg(handle); return ops->restart_autoneg(handle);
} }
...@@ -1361,11 +1363,12 @@ static int hns3_get_fecparam(struct net_device *netdev, ...@@ -1361,11 +1363,12 @@ static int hns3_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec) struct ethtool_fecparam *fec)
{ {
struct hnae3_handle *handle = hns3_get_handle(netdev); struct hnae3_handle *handle = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops; const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
u8 fec_ability; u8 fec_ability;
u8 fec_mode; u8 fec_mode;
if (handle->pdev->revision == 0x20) if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!ops->get_fec) if (!ops->get_fec)
...@@ -1383,10 +1386,11 @@ static int hns3_set_fecparam(struct net_device *netdev, ...@@ -1383,10 +1386,11 @@ static int hns3_set_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec) struct ethtool_fecparam *fec)
{ {
struct hnae3_handle *handle = hns3_get_handle(netdev); struct hnae3_handle *handle = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops; const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
u32 fec_mode; u32 fec_mode;
if (handle->pdev->revision == 0x20) if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!ops->set_fec) if (!ops->set_fec)
...@@ -1404,11 +1408,13 @@ static int hns3_get_module_info(struct net_device *netdev, ...@@ -1404,11 +1408,13 @@ static int hns3_get_module_info(struct net_device *netdev,
#define HNS3_SFF_8636_V1_3 0x03 #define HNS3_SFF_8636_V1_3 0x03
struct hnae3_handle *handle = hns3_get_handle(netdev); struct hnae3_handle *handle = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops; const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
struct hns3_sfp_type sfp_type; struct hns3_sfp_type sfp_type;
int ret; int ret;
if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom) if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
!ops->get_module_eeprom)
return -EOPNOTSUPP; return -EOPNOTSUPP;
memset(&sfp_type, 0, sizeof(sfp_type)); memset(&sfp_type, 0, sizeof(sfp_type));
...@@ -1452,9 +1458,11 @@ static int hns3_get_module_eeprom(struct net_device *netdev, ...@@ -1452,9 +1458,11 @@ static int hns3_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data) struct ethtool_eeprom *ee, u8 *data)
{ {
struct hnae3_handle *handle = hns3_get_handle(netdev); struct hnae3_handle *handle = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops; const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom) if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
!ops->get_module_eeprom)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!ee->len) if (!ee->len)
......
...@@ -330,9 +330,37 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) ...@@ -330,9 +330,37 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
return retval; return retval;
} }
static enum hclge_cmd_status hclge_cmd_query_firmware_version( static void hclge_set_default_capability(struct hclge_dev *hdev)
struct hclge_hw *hw, u32 *version)
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
}
static void hclge_parse_capability(struct hclge_dev *hdev,
struct hclge_query_version_cmd *cmd)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
u32 caps;
caps = __le32_to_cpu(cmd->caps[0]);
if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B))
set_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_INT_QL_B))
set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B))
set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
}
static enum hclge_cmd_status
hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_query_version_cmd *resp; struct hclge_query_version_cmd *resp;
struct hclge_desc desc; struct hclge_desc desc;
int ret; int ret;
...@@ -340,9 +368,20 @@ static enum hclge_cmd_status hclge_cmd_query_firmware_version( ...@@ -340,9 +368,20 @@ static enum hclge_cmd_status hclge_cmd_query_firmware_version(
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
resp = (struct hclge_query_version_cmd *)desc.data; resp = (struct hclge_query_version_cmd *)desc.data;
ret = hclge_cmd_send(hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (!ret) if (ret)
*version = le32_to_cpu(resp->firmware); return ret;
hdev->fw_version = le32_to_cpu(resp->firmware);
ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
HNAE3_PCI_REVISION_BIT_SIZE;
ae_dev->dev_version |= hdev->pdev->revision;
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
hclge_set_default_capability(hdev);
hclge_parse_capability(hdev, resp);
return ret; return ret;
} }
...@@ -402,7 +441,6 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev) ...@@ -402,7 +441,6 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev)
int hclge_cmd_init(struct hclge_dev *hdev) int hclge_cmd_init(struct hclge_dev *hdev)
{ {
u32 version;
int ret; int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock); spin_lock_bh(&hdev->hw.cmq.csq.lock);
...@@ -431,22 +469,23 @@ int hclge_cmd_init(struct hclge_dev *hdev) ...@@ -431,22 +469,23 @@ int hclge_cmd_init(struct hclge_dev *hdev)
goto err_cmd_init; goto err_cmd_init;
} }
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); /* get version and device capabilities */
ret = hclge_cmd_query_version_and_capability(hdev);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"firmware version query failed %d\n", ret); "failed to query version and capabilities, ret = %d\n",
ret);
goto err_cmd_init; goto err_cmd_init;
} }
hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n", dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK, hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
HNAE3_FW_VERSION_BYTE3_SHIFT), HNAE3_FW_VERSION_BYTE3_SHIFT),
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK, hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
HNAE3_FW_VERSION_BYTE2_SHIFT), HNAE3_FW_VERSION_BYTE2_SHIFT),
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK, hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
HNAE3_FW_VERSION_BYTE1_SHIFT), HNAE3_FW_VERSION_BYTE1_SHIFT),
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK, hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT)); HNAE3_FW_VERSION_BYTE0_SHIFT));
/* ask the firmware to enable some features, driver can work without /* ask the firmware to enable some features, driver can work without
......
...@@ -115,7 +115,8 @@ enum hclge_opcode_type { ...@@ -115,7 +115,8 @@ enum hclge_opcode_type {
HCLGE_OPC_DFX_RCB_REG = 0x004D, HCLGE_OPC_DFX_RCB_REG = 0x004D,
HCLGE_OPC_DFX_TQP_REG = 0x004E, HCLGE_OPC_DFX_TQP_REG = 0x004E,
HCLGE_OPC_DFX_SSU_REG_2 = 0x004F, HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
HCLGE_OPC_DFX_QUERY_CHIP_CAP = 0x0050,
HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
/* MAC command */ /* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
...@@ -362,9 +363,26 @@ struct hclge_rx_priv_buff_cmd { ...@@ -362,9 +363,26 @@ struct hclge_rx_priv_buff_cmd {
u8 rsv[6]; u8 rsv[6];
}; };
enum HCLGE_CAP_BITS {
HCLGE_CAP_UDP_GSO_B,
HCLGE_CAP_QB_B,
HCLGE_CAP_FD_FORWARD_TC_B,
HCLGE_CAP_PTP_B,
HCLGE_CAP_INT_QL_B,
HCLGE_CAP_SIMPLE_BD_B,
HCLGE_CAP_TX_PUSH_B,
HCLGE_CAP_PHY_IMP_B,
HCLGE_CAP_TQP_TXRX_INDEP_B,
HCLGE_CAP_HW_PAD_B,
HCLGE_CAP_STASH_B,
};
#define HCLGE_QUERY_CAP_LENGTH 3
struct hclge_query_version_cmd { struct hclge_query_version_cmd {
__le32 firmware; __le32 firmware;
__le32 firmware_rsv[5]; __le32 hardware;
__le32 rsv;
__le32 caps[HCLGE_QUERY_CAP_LENGTH]; /* capabilities of device */
}; };
#define HCLGE_RX_PRIV_EN_B 15 #define HCLGE_RX_PRIV_EN_B 15
...@@ -1071,6 +1089,20 @@ struct hclge_sfp_info_bd0_cmd { ...@@ -1071,6 +1089,20 @@ struct hclge_sfp_info_bd0_cmd {
u8 data[HCLGE_SFP_INFO_BD0_LEN]; u8 data[HCLGE_SFP_INFO_BD0_LEN];
}; };
#define HCLGE_QUERY_DEV_SPECS_BD_NUM 4
struct hclge_dev_specs_0_cmd {
__le32 rsv0;
__le32 mac_entry_num;
__le32 mng_entry_num;
__le16 rss_ind_tbl_size;
__le16 rss_key_size;
__le16 int_ql_max;
u8 max_non_tso_bd_num;
u8 rsv1;
__le32 max_tm_rate;
};
int hclge_cmd_init(struct hclge_dev *hdev); int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{ {
......
...@@ -729,7 +729,7 @@ static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en) ...@@ -729,7 +729,7 @@ static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en)
struct hclge_desc desc; struct hclge_desc desc;
int ret; int ret;
if (hdev->pdev->revision < 0x21) if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return 0; return 0;
/* configure NCSI error interrupts */ /* configure NCSI error interrupts */
...@@ -808,7 +808,7 @@ static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, ...@@ -808,7 +808,7 @@ static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK); cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
desc[1].data[1] = desc[1].data[1] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK); cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
if (hdev->pdev->revision >= 0x21) if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
desc[1].data[2] = desc[1].data[2] =
cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK); cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK);
} else if (cmd == HCLGE_PPP_CMD1_INT_CMD) { } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
...@@ -1041,7 +1041,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en) ...@@ -1041,7 +1041,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
if (en) { if (en) {
if (hdev->pdev->revision >= 0x21) if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
desc[0].data[0] = desc[0].data[0] =
cpu_to_le32(HCLGE_SSU_COMMON_INT_EN); cpu_to_le32(HCLGE_SSU_COMMON_INT_EN);
else else
...@@ -1550,7 +1550,8 @@ int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) ...@@ -1550,7 +1550,8 @@ int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
struct hclge_desc desc; struct hclge_desc desc;
int ret; int ret;
if (hdev->pdev->revision < 0x21 || !hnae3_dev_roce_supported(hdev)) if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
!hnae3_dev_roce_supported(hdev))
return 0; return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false);
...@@ -1576,8 +1577,7 @@ static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) ...@@ -1576,8 +1577,7 @@ static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv; struct hclge_dev *hdev = ae_dev->priv;
enum hnae3_reset_type reset_type; enum hnae3_reset_type reset_type;
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
hdev->pdev->revision < 0x21)
return; return;
reset_type = hclge_log_and_clear_rocee_ras_error(hdev); reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
...@@ -1663,7 +1663,7 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) ...@@ -1663,7 +1663,7 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
} }
/* Handling Non-fatal Rocee RAS errors */ /* Handling Non-fatal Rocee RAS errors */
if (hdev->pdev->revision >= 0x21 && if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 &&
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) { status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
dev_err(dev, "ROCEE Non-Fatal RAS error identified\n"); dev_err(dev, "ROCEE Non-Fatal RAS error identified\n");
hclge_handle_rocee_ras_error(ae_dev); hclge_handle_rocee_ras_error(ae_dev);
......
...@@ -740,7 +740,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) ...@@ -740,7 +740,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
if (stringset == ETH_SS_TEST) { if (stringset == ETH_SS_TEST) {
/* clear loopback bit flags at first */ /* clear loopback bit flags at first */
handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
if (hdev->pdev->revision >= 0x21 || if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
...@@ -1157,7 +1157,7 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, ...@@ -1157,7 +1157,7 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
hclge_convert_setting_sr(mac, speed_ability); hclge_convert_setting_sr(mac, speed_ability);
hclge_convert_setting_lr(mac, speed_ability); hclge_convert_setting_lr(mac, speed_ability);
hclge_convert_setting_cr(mac, speed_ability); hclge_convert_setting_cr(mac, speed_ability);
if (hdev->pdev->revision >= 0x21) if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac); hclge_convert_setting_fec(mac);
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
...@@ -1171,7 +1171,7 @@ static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, ...@@ -1171,7 +1171,7 @@ static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
struct hclge_mac *mac = &hdev->hw.mac; struct hclge_mac *mac = &hdev->hw.mac;
hclge_convert_setting_kr(mac, speed_ability); hclge_convert_setting_kr(mac, speed_ability);
if (hdev->pdev->revision >= 0x21) if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac); hclge_convert_setting_fec(mac);
linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
...@@ -1356,6 +1356,78 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) ...@@ -1356,6 +1356,78 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
return 0; return 0;
} }
static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
{
#define HCLGE_MAX_NON_TSO_BD_NUM 8U
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
struct hclge_desc *desc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_dev_specs_0_cmd *req0;
req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
ae_dev->dev_specs.rss_ind_tbl_size =
le16_to_cpu(req0->rss_ind_tbl_size);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
{
struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
if (!dev_specs->max_non_tso_bd_num)
dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
if (!dev_specs->rss_ind_tbl_size)
dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
{
struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
int ret;
int i;
/* set default specifications as devices lower than version V3 do not
* support querying specifications from firmware.
*/
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
hclge_set_default_dev_specs(hdev);
return 0;
}
for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
true);
desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
}
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
if (ret)
return ret;
hclge_parse_dev_specs(hdev, desc);
hclge_check_dev_specs(hdev);
return 0;
}
static int hclge_get_cap(struct hclge_dev *hdev) static int hclge_get_cap(struct hclge_dev *hdev)
{ {
int ret; int ret;
...@@ -2892,7 +2964,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev) ...@@ -2892,7 +2964,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (!hdev->support_sfp_query) if (!hdev->support_sfp_query)
return 0; return 0;
if (hdev->pdev->revision >= 0x21) if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
ret = hclge_get_sfp_info(hdev, mac); ret = hclge_get_sfp_info(hdev, mac);
else else
ret = hclge_get_sfp_speed(hdev, &speed); ret = hclge_get_sfp_speed(hdev, &speed);
...@@ -2904,7 +2976,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev) ...@@ -2904,7 +2976,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
return ret; return ret;
} }
if (hdev->pdev->revision >= 0x21) { if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
if (mac->speed_type == QUERY_ACTIVE_SPEED) { if (mac->speed_type == QUERY_ACTIVE_SPEED) {
hclge_update_port_capability(mac); hclge_update_port_capability(mac);
return 0; return 0;
...@@ -3569,7 +3641,7 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) ...@@ -3569,7 +3641,7 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
/* For revision 0x20, the reset interrupt source /* For revision 0x20, the reset interrupt source
* can only be cleared after hardware reset done * can only be cleared after hardware reset done
*/ */
if (hdev->pdev->revision == 0x20) if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
clearval); clearval);
...@@ -4576,7 +4648,7 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev) ...@@ -4576,7 +4648,7 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
struct hclge_vport *vport = hdev->vport; struct hclge_vport *vport = hdev->vport;
if (hdev->pdev->revision >= 0x21) if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
...@@ -4776,13 +4848,14 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, ...@@ -4776,13 +4848,14 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc) bool en_mc_pmc)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
bool en_bc_pmc = true; bool en_bc_pmc = true;
/* For revision 0x20, if broadcast promisc enabled, vlan filter is /* For device whose version below V2, if broadcast promisc enabled,
* always bypassed. So broadcast promisc should be disabled until * vlan filter is always bypassed. So broadcast promisc should be
* user enable promisc mode * disabled until user enable promisc mode
*/ */
if (handle->pdev->revision == 0x20) if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc, return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
...@@ -6797,7 +6870,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle, ...@@ -6797,7 +6870,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
* the same, the packets are looped back in the SSU. If SSU loopback * the same, the packets are looped back in the SSU. If SSU loopback
* is disabled, packets can reach MAC even if SMAC is the same as DMAC. * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
*/ */
if (hdev->pdev->revision >= 0x21) { if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B); u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
...@@ -8299,7 +8372,7 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) ...@@ -8299,7 +8372,7 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
if (hdev->pdev->revision >= 0x21) { if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
HCLGE_FILTER_FE_EGRESS, enable, 0); HCLGE_FILTER_FE_EGRESS, enable, 0);
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
...@@ -8659,7 +8732,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) ...@@ -8659,7 +8732,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
int ret; int ret;
int i; int i;
if (hdev->pdev->revision >= 0x21) { if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
/* for revision 0x21, vf vlan filter is per function */ /* for revision 0x21, vf vlan filter is per function */
for (i = 0; i < hdev->num_alloc_vport; i++) { for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i]; vport = &hdev->vport[i];
...@@ -9014,7 +9087,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, ...@@ -9014,7 +9087,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
u16 state; u16 state;
int ret; int ret;
if (hdev->pdev->revision == 0x20) if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP; return -EOPNOTSUPP;
vport = hclge_get_vf_vport(hdev, vfid); vport = hclge_get_vf_vport(hdev, vfid);
...@@ -9989,6 +10062,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -9989,6 +10062,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret) if (ret)
goto err_cmd_uninit; goto err_cmd_uninit;
ret = hclge_query_dev_specs(hdev);
if (ret) {
dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
ret);
goto err_cmd_uninit;
}
ret = hclge_configure(hdev); ret = hclge_configure(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
...@@ -10186,7 +10266,7 @@ static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, ...@@ -10186,7 +10266,7 @@ static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
u32 new_spoofchk = enable ? 1 : 0; u32 new_spoofchk = enable ? 1 : 0;
int ret; int ret;
if (hdev->pdev->revision == 0x20) if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP; return -EOPNOTSUPP;
vport = hclge_get_vf_vport(hdev, vf); vport = hclge_get_vf_vport(hdev, vf);
...@@ -10219,7 +10299,7 @@ static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) ...@@ -10219,7 +10299,7 @@ static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
int ret; int ret;
int i; int i;
if (hdev->pdev->revision == 0x20) if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return 0; return 0;
/* resume the vf spoof check state after reset */ /* resume the vf spoof check state after reset */
...@@ -10239,6 +10319,7 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) ...@@ -10239,6 +10319,7 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
u32 new_trusted = enable ? 1 : 0; u32 new_trusted = enable ? 1 : 0;
bool en_bc_pmc; bool en_bc_pmc;
int ret; int ret;
...@@ -10252,7 +10333,7 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) ...@@ -10252,7 +10333,7 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
/* Disable promisc mode for VF if it is not trusted any more. */ /* Disable promisc mode for VF if it is not trusted any more. */
if (!enable && vport->vf_info.promisc_enable) { if (!enable && vport->vf_info.promisc_enable) {
en_bc_pmc = hdev->pdev->revision != 0x20; en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
ret = hclge_set_vport_promisc_mode(vport, false, false, ret = hclge_set_vport_promisc_mode(vport, false, false,
en_bc_pmc); en_bc_pmc);
if (ret) if (ret)
......
...@@ -23,14 +23,11 @@ enum hclge_shaper_level { ...@@ -23,14 +23,11 @@ enum hclge_shaper_level {
#define HCLGE_SHAPER_BS_U_DEF 5 #define HCLGE_SHAPER_BS_U_DEF 5
#define HCLGE_SHAPER_BS_S_DEF 20 #define HCLGE_SHAPER_BS_S_DEF 20
#define HCLGE_ETHER_MAX_RATE 100000
/* hclge_shaper_para_calc: calculate ir parameter for the shaper /* hclge_shaper_para_calc: calculate ir parameter for the shaper
* @ir: Rate to be config, its unit is Mbps * @ir: Rate to be config, its unit is Mbps
* @shaper_level: the shaper level. eg: port, pg, priority, queueset * @shaper_level: the shaper level. eg: port, pg, priority, queueset
* @ir_b: IR_B parameter of IR shaper * @ir_para: parameters of IR shaper
* @ir_u: IR_U parameter of IR shaper * @max_tm_rate: max tm rate is available to config
* @ir_s: IR_S parameter of IR shaper
* *
* the formula: * the formula:
* *
...@@ -41,7 +38,8 @@ enum hclge_shaper_level { ...@@ -41,7 +38,8 @@ enum hclge_shaper_level {
* @return: 0: calculate sucessful, negative: fail * @return: 0: calculate sucessful, negative: fail
*/ */
static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
u8 *ir_b, u8 *ir_u, u8 *ir_s) struct hclge_shaper_ir_para *ir_para,
u32 max_tm_rate)
{ {
#define DIVISOR_CLK (1000 * 8) #define DIVISOR_CLK (1000 * 8)
#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK) #define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
...@@ -59,7 +57,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, ...@@ -59,7 +57,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
/* Calc tick */ /* Calc tick */
if (shaper_level >= HCLGE_SHAPER_LVL_CNT || if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
ir > HCLGE_ETHER_MAX_RATE) ir > max_tm_rate)
return -EINVAL; return -EINVAL;
tick = tick_array[shaper_level]; tick = tick_array[shaper_level];
...@@ -74,9 +72,9 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, ...@@ -74,9 +72,9 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick; ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
if (ir_calc == ir) { if (ir_calc == ir) {
*ir_b = 126; ir_para->ir_b = 126;
*ir_u = 0; ir_para->ir_u = 0;
*ir_s = 0; ir_para->ir_s = 0;
return 0; return 0;
} else if (ir_calc > ir) { } else if (ir_calc > ir) {
...@@ -86,8 +84,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, ...@@ -86,8 +84,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc)); ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
} }
*ir_b = (ir * tick * (1 << ir_s_calc) + (DIVISOR_CLK >> 1)) / ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
DIVISOR_CLK; (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
} else { } else {
/* Increasing the numerator to select ir_u value */ /* Increasing the numerator to select ir_u value */
u32 numerator; u32 numerator;
...@@ -99,15 +97,16 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, ...@@ -99,15 +97,16 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
} }
if (ir_calc == ir) { if (ir_calc == ir) {
*ir_b = 126; ir_para->ir_b = 126;
} else { } else {
u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc); u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
*ir_b = (ir * tick + (denominator >> 1)) / denominator; ir_para->ir_b = (ir * tick + (denominator >> 1)) /
denominator;
} }
} }
*ir_u = ir_u_calc; ir_para->ir_u = ir_u_calc;
*ir_s = ir_s_calc; ir_para->ir_s = ir_s_calc;
return 0; return 0;
} }
...@@ -400,21 +399,22 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, ...@@ -400,21 +399,22 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{ {
struct hclge_port_shapping_cmd *shap_cfg_cmd; struct hclge_port_shapping_cmd *shap_cfg_cmd;
struct hclge_shaper_ir_para ir_para;
struct hclge_desc desc; struct hclge_desc desc;
u8 ir_u, ir_b, ir_s;
u32 shapping_para; u32 shapping_para;
int ret; int ret;
ret = hclge_shaper_para_calc(hdev->hw.mac.speed, ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
HCLGE_SHAPER_LVL_PORT, &ir_para,
&ir_b, &ir_u, &ir_s); hdev->ae_dev->dev_specs.max_tm_rate);
if (ret) if (ret)
return ret; return ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
...@@ -515,21 +515,23 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) ...@@ -515,21 +515,23 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
{ {
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_qs_shapping_cmd *shap_cfg_cmd; struct hclge_qs_shapping_cmd *shap_cfg_cmd;
struct hclge_shaper_ir_para ir_para;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct hclge_desc desc; struct hclge_desc desc;
u8 ir_b, ir_u, ir_s;
u32 shaper_para; u32 shaper_para;
int ret, i; int ret, i;
if (!max_tx_rate) if (!max_tx_rate)
max_tx_rate = HCLGE_ETHER_MAX_RATE; max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET, ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
&ir_b, &ir_u, &ir_s); &ir_para,
hdev->ae_dev->dev_specs.max_tm_rate);
if (ret) if (ret)
return ret; return ret;
shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
...@@ -668,7 +670,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev) ...@@ -668,7 +670,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].pg_id = i; hdev->tm_info.pg_info[i].pg_id = i;
hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE; hdev->tm_info.pg_info[i].bw_limit =
hdev->ae_dev->dev_specs.max_tm_rate;
if (i != 0) if (i != 0)
continue; continue;
...@@ -729,7 +732,8 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) ...@@ -729,7 +732,8 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
{ {
u8 ir_u, ir_b, ir_s; u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
struct hclge_shaper_ir_para ir_para;
u32 shaper_para; u32 shaper_para;
int ret; int ret;
u32 i; u32 i;
...@@ -741,10 +745,9 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) ...@@ -741,10 +745,9 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
/* Pg to pri */ /* Pg to pri */
for (i = 0; i < hdev->tm_info.num_pg; i++) { for (i = 0; i < hdev->tm_info.num_pg; i++) {
/* Calc shaper para */ /* Calc shaper para */
ret = hclge_shaper_para_calc( ret = hclge_shaper_para_calc(hdev->tm_info.pg_info[i].bw_limit,
hdev->tm_info.pg_info[i].bw_limit,
HCLGE_SHAPER_LVL_PG, HCLGE_SHAPER_LVL_PG,
&ir_b, &ir_u, &ir_s); &ir_para, max_tm_rate);
if (ret) if (ret)
return ret; return ret;
...@@ -757,7 +760,9 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) ...@@ -757,7 +760,9 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
if (ret) if (ret)
return ret; return ret;
shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
ir_para.ir_u,
ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev, ret = hclge_tm_pg_shapping_cfg(hdev,
...@@ -861,16 +866,16 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) ...@@ -861,16 +866,16 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
{ {
u8 ir_u, ir_b, ir_s; u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
struct hclge_shaper_ir_para ir_para;
u32 shaper_para; u32 shaper_para;
int ret; int ret;
u32 i; u32 i;
for (i = 0; i < hdev->tm_info.num_tc; i++) { for (i = 0; i < hdev->tm_info.num_tc; i++) {
ret = hclge_shaper_para_calc( ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
hdev->tm_info.tc_info[i].bw_limit,
HCLGE_SHAPER_LVL_PRI, HCLGE_SHAPER_LVL_PRI,
&ir_b, &ir_u, &ir_s); &ir_para, max_tm_rate);
if (ret) if (ret)
return ret; return ret;
...@@ -882,7 +887,9 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) ...@@ -882,7 +887,9 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
if (ret) if (ret)
return ret; return ret;
shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
ir_para.ir_u,
ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
...@@ -897,12 +904,13 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) ...@@ -897,12 +904,13 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
{ {
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u8 ir_u, ir_b, ir_s; struct hclge_shaper_ir_para ir_para;
u32 shaper_para; u32 shaper_para;
int ret; int ret;
ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
&ir_b, &ir_u, &ir_s); &ir_para,
hdev->ae_dev->dev_specs.max_tm_rate);
if (ret) if (ret)
return ret; return ret;
...@@ -914,7 +922,8 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) ...@@ -914,7 +922,8 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
if (ret) if (ret)
return ret; return ret;
shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF); HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
...@@ -929,15 +938,15 @@ static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) ...@@ -929,15 +938,15 @@ static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
{ {
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u8 ir_u, ir_b, ir_s; u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
struct hclge_shaper_ir_para ir_para;
u32 i; u32 i;
int ret; int ret;
for (i = 0; i < kinfo->num_tc; i++) { for (i = 0; i < kinfo->num_tc; i++) {
ret = hclge_shaper_para_calc( ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
hdev->tm_info.tc_info[i].bw_limit,
HCLGE_SHAPER_LVL_QSET, HCLGE_SHAPER_LVL_QSET,
&ir_b, &ir_u, &ir_s); &ir_para, max_tm_rate);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0) #define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE) #define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
#define HCLGE_ETHER_MAX_RATE 100000
struct hclge_pg_to_pri_link_cmd { struct hclge_pg_to_pri_link_cmd {
u8 pg_id; u8 pg_id;
u8 rsvd1[3]; u8 rsvd1[3];
...@@ -139,6 +141,12 @@ struct hclge_port_shapping_cmd { ...@@ -139,6 +141,12 @@ struct hclge_port_shapping_cmd {
__le32 port_shapping_para; __le32 port_shapping_para;
}; };
struct hclge_shaper_ir_para {
u8 ir_b; /* IR_B parameter of IR shaper */
u8 ir_u; /* IR_U parameter of IR shaper */
u8 ir_s; /* IR_S parameter of IR shaper */
};
#define hclge_tm_set_field(dest, string, val) \ #define hclge_tm_set_field(dest, string, val) \
hnae3_set_field((dest), \ hnae3_set_field((dest), \
(HCLGE_TM_SHAP_##string##_MSK), \ (HCLGE_TM_SHAP_##string##_MSK), \
......
...@@ -313,9 +313,34 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) ...@@ -313,9 +313,34 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
return status; return status;
} }
static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw, static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
u32 *version)
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
}
static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
struct hclgevf_query_version_cmd *cmd)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
u32 caps;
caps = __le32_to_cpu(cmd->caps[0]);
if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_GSO_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_INT_QL_B))
set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_TQP_TXRX_INDEP_B))
set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
}
static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclgevf_query_version_cmd *resp; struct hclgevf_query_version_cmd *resp;
struct hclgevf_desc desc; struct hclgevf_desc desc;
int status; int status;
...@@ -323,9 +348,20 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw, ...@@ -323,9 +348,20 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
resp = (struct hclgevf_query_version_cmd *)desc.data; resp = (struct hclgevf_query_version_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1); hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
status = hclgevf_cmd_send(hw, &desc, 1); status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (!status) if (status)
*version = le32_to_cpu(resp->firmware); return status;
hdev->fw_version = le32_to_cpu(resp->firmware);
ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
HNAE3_PCI_REVISION_BIT_SIZE;
ae_dev->dev_version |= hdev->pdev->revision;
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
hclgevf_set_default_capability(hdev);
hclgevf_parse_capability(hdev, resp);
return status; return status;
} }
...@@ -364,7 +400,6 @@ int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev) ...@@ -364,7 +400,6 @@ int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
int hclgevf_cmd_init(struct hclgevf_dev *hdev) int hclgevf_cmd_init(struct hclgevf_dev *hdev)
{ {
u32 version;
int ret; int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock); spin_lock_bh(&hdev->hw.cmq.csq.lock);
...@@ -395,23 +430,22 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) ...@@ -395,23 +430,22 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
goto err_cmd_init; goto err_cmd_init;
} }
/* get firmware version */ /* get version and device capabilities */
ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version); ret = hclgevf_cmd_query_version_and_capability(hdev);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"failed(%d) to query firmware version\n", ret); "failed to query version and capabilities, ret = %d\n", ret);
goto err_cmd_init; goto err_cmd_init;
} }
hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n", dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK, hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
HNAE3_FW_VERSION_BYTE3_SHIFT), HNAE3_FW_VERSION_BYTE3_SHIFT),
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK, hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
HNAE3_FW_VERSION_BYTE2_SHIFT), HNAE3_FW_VERSION_BYTE2_SHIFT),
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK, hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
HNAE3_FW_VERSION_BYTE1_SHIFT), HNAE3_FW_VERSION_BYTE1_SHIFT),
hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK, hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT)); HNAE3_FW_VERSION_BYTE0_SHIFT));
return 0; return 0;
......
...@@ -91,6 +91,8 @@ enum hclgevf_opcode_type { ...@@ -91,6 +91,8 @@ enum hclgevf_opcode_type {
/* Generic command */ /* Generic command */
HCLGEVF_OPC_QUERY_FW_VER = 0x0001, HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024, HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
HCLGEVF_OPC_QUERY_DEV_SPECS = 0x0050,
/* TQP command */ /* TQP command */
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03, HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13, HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
...@@ -141,9 +143,26 @@ struct hclgevf_ctrl_vector_chain { ...@@ -141,9 +143,26 @@ struct hclgevf_ctrl_vector_chain {
u8 resv; u8 resv;
}; };
enum HCLGEVF_CAP_BITS {
HCLGEVF_CAP_UDP_GSO_B,
HCLGEVF_CAP_QB_B,
HCLGEVF_CAP_FD_FORWARD_TC_B,
HCLGEVF_CAP_PTP_B,
HCLGEVF_CAP_INT_QL_B,
HCLGEVF_CAP_SIMPLE_BD_B,
HCLGEVF_CAP_TX_PUSH_B,
HCLGEVF_CAP_PHY_IMP_B,
HCLGEVF_CAP_TQP_TXRX_INDEP_B,
HCLGEVF_CAP_HW_PAD_B,
HCLGEVF_CAP_STASH_B,
};
#define HCLGEVF_QUERY_CAP_LENGTH 3
struct hclgevf_query_version_cmd { struct hclgevf_query_version_cmd {
__le32 firmware; __le32 firmware;
__le32 firmware_rsv[5]; __le32 hardware;
__le32 rsv;
__le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */
}; };
#define HCLGEVF_MSIX_OFT_ROCEE_S 0 #define HCLGEVF_MSIX_OFT_ROCEE_S 0
...@@ -253,6 +272,19 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { ...@@ -253,6 +272,19 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3 #define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100 #define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
struct hclgevf_dev_specs_0_cmd {
__le32 rsv0;
__le32 mac_entry_num;
__le32 mng_entry_num;
__le16 rss_ind_tbl_size;
__le16 rss_key_size;
__le16 int_ql_max;
u8 max_non_tso_bd_num;
u8 rsv1[5];
};
static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
{ {
writel(value, base + reg); writel(value, base + reg);
......
...@@ -746,7 +746,7 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, ...@@ -746,7 +746,7 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
int i, ret; int i, ret;
if (handle->pdev->revision >= 0x21) { if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
/* Get hash algorithm */ /* Get hash algorithm */
if (hfunc) { if (hfunc) {
switch (rss_cfg->hash_algo) { switch (rss_cfg->hash_algo) {
...@@ -792,7 +792,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, ...@@ -792,7 +792,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret, i; int ret, i;
if (handle->pdev->revision >= 0x21) { if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
/* Set the RSS Hash Key if specififed by the user */ /* Set the RSS Hash Key if specififed by the user */
if (key) { if (key) {
switch (hfunc) { switch (hfunc) {
...@@ -864,7 +864,7 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, ...@@ -864,7 +864,7 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
u8 tuple_sets; u8 tuple_sets;
int ret; int ret;
if (handle->pdev->revision == 0x20) if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (nfc->data & if (nfc->data &
...@@ -942,7 +942,7 @@ static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, ...@@ -942,7 +942,7 @@ static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
u8 tuple_sets; u8 tuple_sets;
if (handle->pdev->revision == 0x20) if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP; return -EOPNOTSUPP;
nfc->data = 0; nfc->data = 0;
...@@ -1155,10 +1155,9 @@ static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, ...@@ -1155,10 +1155,9 @@ static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc) bool en_mc_pmc)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct pci_dev *pdev = hdev->pdev;
bool en_bc_pmc; bool en_bc_pmc;
en_bc_pmc = pdev->revision != 0x20; en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
en_bc_pmc); en_bc_pmc);
...@@ -2288,7 +2287,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, ...@@ -2288,7 +2287,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
* register, so we should just write 0 to the bit we are * register, so we should just write 0 to the bit we are
* handling, and keep other bits as cmdq_stat_reg. * handling, and keep other bits as cmdq_stat_reg.
*/ */
if (hdev->pdev->revision >= 0x21) if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
*clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
else else
*clearval = cmdq_stat_reg & *clearval = cmdq_stat_reg &
...@@ -2431,7 +2430,7 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) ...@@ -2431,7 +2430,7 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
rss_cfg->rss_size = hdev->nic.kinfo.rss_size; rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
tuple_sets = &rss_cfg->rss_tuple_sets; tuple_sets = &rss_cfg->rss_tuple_sets;
if (hdev->pdev->revision >= 0x21) { if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
HCLGEVF_RSS_KEY_SIZE); HCLGEVF_RSS_KEY_SIZE);
...@@ -2456,7 +2455,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) ...@@ -2456,7 +2455,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret; int ret;
if (hdev->pdev->revision >= 0x21) { if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
rss_cfg->rss_hash_key); rss_cfg->rss_hash_key);
if (ret) if (ret)
...@@ -2940,6 +2939,76 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) ...@@ -2940,6 +2939,76 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
return 0; return 0;
} }
static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
{
#define HCLGEVF_MAX_NON_TSO_BD_NUM 8U
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
ae_dev->dev_specs.max_non_tso_bd_num =
HCLGEVF_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
}
static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
struct hclgevf_desc *desc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclgevf_dev_specs_0_cmd *req0;
req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
ae_dev->dev_specs.rss_ind_tbl_size =
le16_to_cpu(req0->rss_ind_tbl_size);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
}
static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
{
struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
if (!dev_specs->max_non_tso_bd_num)
dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
if (!dev_specs->rss_ind_tbl_size)
dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
}
static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
{
struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
int ret;
int i;
/* set default specifications as devices lower than version V3 do not
* support querying specifications from firmware.
*/
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
hclgevf_set_default_dev_specs(hdev);
return 0;
}
for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
hclgevf_cmd_setup_basic_desc(&desc[i],
HCLGEVF_OPC_QUERY_DEV_SPECS, true);
desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT);
}
hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS,
true);
ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
if (ret)
return ret;
hclgevf_parse_dev_specs(hdev, desc);
hclgevf_check_dev_specs(hdev);
return 0;
}
static int hclgevf_pci_reset(struct hclgevf_dev *hdev) static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
{ {
struct pci_dev *pdev = hdev->pdev; struct pci_dev *pdev = hdev->pdev;
...@@ -3048,6 +3117,13 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) ...@@ -3048,6 +3117,13 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret) if (ret)
goto err_cmd_init; goto err_cmd_init;
ret = hclgevf_query_dev_specs(hdev);
if (ret) {
dev_err(&pdev->dev,
"failed to query dev specifications, ret = %d\n", ret);
goto err_cmd_init;
}
ret = hclgevf_init_msi(hdev); ret = hclgevf_init_msi(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment