Commit f6779e4e authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-a-few-code-improvements'

Peng Li says:

====================
net: hns3: a few code improvements

This patchset removes some redundant code and fixes a few code
stylistic issues from internal concentrated review,
no functional changes introduced.

---
Change log:
V1 -> V2:
1, remove a patch according to the comment reported by David Miller.
---
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 50d4feb5 e4e87715
...@@ -85,10 +85,12 @@ config HNS3 ...@@ -85,10 +85,12 @@ config HNS3
drivers(like ODP)to register with HNAE devices and their associated drivers(like ODP)to register with HNAE devices and their associated
operations. operations.
if HNS3
config HNS3_HCLGE config HNS3_HCLGE
tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support" tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
default m
depends on PCI_MSI depends on PCI_MSI
depends on HNS3
---help--- ---help---
This selects the HNS3_HCLGE network acceleration engine & its hardware This selects the HNS3_HCLGE network acceleration engine & its hardware
compatibility layer. The engine would be used in Hisilicon hip08 family of compatibility layer. The engine would be used in Hisilicon hip08 family of
...@@ -97,16 +99,15 @@ config HNS3_HCLGE ...@@ -97,16 +99,15 @@ config HNS3_HCLGE
config HNS3_DCB config HNS3_DCB
bool "Hisilicon HNS3 Data Center Bridge Support" bool "Hisilicon HNS3 Data Center Bridge Support"
default n default n
depends on HNS3 && HNS3_HCLGE && DCB depends on HNS3_HCLGE && DCB
---help--- ---help---
Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver. Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver.
If unsure, say N. If unsure, say N.
config HNS3_HCLGEVF config HNS3_HCLGEVF
tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support" tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
depends on PCI_MSI depends on PCI_MSI
depends on HNS3
depends on HNS3_HCLGE depends on HNS3_HCLGE
---help--- ---help---
This selects the HNS3 VF drivers network acceleration engine & its hardware This selects the HNS3 VF drivers network acceleration engine & its hardware
...@@ -115,11 +116,13 @@ config HNS3_HCLGEVF ...@@ -115,11 +116,13 @@ config HNS3_HCLGEVF
config HNS3_ENET config HNS3_ENET
tristate "Hisilicon HNS3 Ethernet Device Support" tristate "Hisilicon HNS3 Ethernet Device Support"
default m
depends on 64BIT && PCI depends on 64BIT && PCI
depends on HNS3
---help--- ---help---
This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
devices and their associated operations. devices and their associated operations.
endif #HNS3
endif # NET_VENDOR_HISILICON endif # NET_VENDOR_HISILICON
...@@ -40,13 +40,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client, ...@@ -40,13 +40,13 @@ static void hnae3_set_client_init_flag(struct hnae3_client *client,
{ {
switch (client->type) { switch (client->type) {
case HNAE3_CLIENT_KNIC: case HNAE3_CLIENT_KNIC:
hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
break; break;
case HNAE3_CLIENT_UNIC: case HNAE3_CLIENT_UNIC:
hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited); hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
break; break;
case HNAE3_CLIENT_ROCE: case HNAE3_CLIENT_ROCE:
hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
break; break;
default: default:
break; break;
...@@ -60,16 +60,16 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client, ...@@ -60,16 +60,16 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
switch (client->type) { switch (client->type) {
case HNAE3_CLIENT_KNIC: case HNAE3_CLIENT_KNIC:
inited = hnae_get_bit(ae_dev->flag, inited = hnae3_get_bit(ae_dev->flag,
HNAE3_KNIC_CLIENT_INITED_B); HNAE3_KNIC_CLIENT_INITED_B);
break; break;
case HNAE3_CLIENT_UNIC: case HNAE3_CLIENT_UNIC:
inited = hnae_get_bit(ae_dev->flag, inited = hnae3_get_bit(ae_dev->flag,
HNAE3_UNIC_CLIENT_INITED_B); HNAE3_UNIC_CLIENT_INITED_B);
break; break;
case HNAE3_CLIENT_ROCE: case HNAE3_CLIENT_ROCE:
inited = hnae_get_bit(ae_dev->flag, inited = hnae3_get_bit(ae_dev->flag,
HNAE3_ROCE_CLIENT_INITED_B); HNAE3_ROCE_CLIENT_INITED_B);
break; break;
default: default:
break; break;
...@@ -85,7 +85,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, ...@@ -85,7 +85,7 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
/* check if this client matches the type of ae_dev */ /* check if this client matches the type of ae_dev */
if (!(hnae3_client_match(client->type, ae_dev->dev_type) && if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
return 0; return 0;
} }
...@@ -190,7 +190,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) ...@@ -190,7 +190,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
continue; continue;
} }
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
/* check the client list for the match with this ae_dev type and /* check the client list for the match with this ae_dev type and
* initialize the figure out client instance * initialize the figure out client instance
...@@ -220,7 +220,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) ...@@ -220,7 +220,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */ /* Check if there are matched ae_dev */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue; continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
...@@ -234,7 +234,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) ...@@ -234,7 +234,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
hnae3_match_n_instantiate(client, ae_dev, false); hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev); ae_algo->ops->uninit_ae_dev(ae_dev);
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
} }
list_del(&ae_algo->node); list_del(&ae_algo->node);
...@@ -278,7 +278,7 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -278,7 +278,7 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
goto out_err; goto out_err;
} }
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
break; break;
} }
...@@ -310,7 +310,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -310,7 +310,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
mutex_lock(&hnae3_common_lock); mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */ /* Check if there are matched ae_algo */
list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
continue; continue;
id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
...@@ -321,7 +321,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -321,7 +321,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
hnae3_match_n_instantiate(client, ae_dev, false); hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev); ae_algo->ops->uninit_ae_dev(ae_dev);
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
} }
list_del(&ae_dev->node); list_del(&ae_dev->node);
......
...@@ -62,10 +62,10 @@ ...@@ -62,10 +62,10 @@
BIT(HNAE3_DEV_SUPPORT_ROCE_B)) BIT(HNAE3_DEV_SUPPORT_ROCE_B))
#define hnae3_dev_roce_supported(hdev) \ #define hnae3_dev_roce_supported(hdev) \
hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B) hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
#define hnae3_dev_dcb_supported(hdev) \ #define hnae3_dev_dcb_supported(hdev) \
hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B) hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define ring_ptr_move_fw(ring, p) \ #define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num) ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
...@@ -167,7 +167,6 @@ struct hnae3_client_ops { ...@@ -167,7 +167,6 @@ struct hnae3_client_ops {
#define HNAE3_CLIENT_NAME_LENGTH 16 #define HNAE3_CLIENT_NAME_LENGTH 16
struct hnae3_client { struct hnae3_client {
char name[HNAE3_CLIENT_NAME_LENGTH]; char name[HNAE3_CLIENT_NAME_LENGTH];
u16 version;
unsigned long state; unsigned long state;
enum hnae3_client_type type; enum hnae3_client_type type;
const struct hnae3_client_ops *ops; const struct hnae3_client_ops *ops;
...@@ -436,7 +435,6 @@ struct hnae3_dcb_ops { ...@@ -436,7 +435,6 @@ struct hnae3_dcb_ops {
struct hnae3_ae_algo { struct hnae3_ae_algo {
const struct hnae3_ae_ops *ops; const struct hnae3_ae_ops *ops;
struct list_head node; struct list_head node;
char name[HNAE3_CLASS_NAME_SIZE];
const struct pci_device_id *pdev_id_table; const struct pci_device_id *pdev_id_table;
}; };
...@@ -509,17 +507,17 @@ struct hnae3_handle { ...@@ -509,17 +507,17 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */ u32 numa_node_mask; /* for multi-chip support */
}; };
#define hnae_set_field(origin, mask, shift, val) \ #define hnae3_set_field(origin, mask, shift, val) \
do { \ do { \
(origin) &= (~(mask)); \ (origin) &= (~(mask)); \
(origin) |= ((val) << (shift)) & (mask); \ (origin) |= ((val) << (shift)) & (mask); \
} while (0) } while (0)
#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) #define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
#define hnae_set_bit(origin, shift, val) \ #define hnae3_set_bit(origin, shift, val) \
hnae_set_field((origin), (0x1 << (shift)), (shift), (val)) hnae3_set_field((origin), (0x1 << (shift)), (shift), (val))
#define hnae_get_bit(origin, shift) \ #define hnae3_get_bit(origin, shift) \
hnae_get_field((origin), (0x1 << (shift)), (shift)) hnae3_get_field((origin), (0x1 << (shift)), (shift))
void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
......
...@@ -493,8 +493,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, ...@@ -493,8 +493,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* find the txbd field values */ /* find the txbd field values */
*paylen = skb->len - hdr_len; *paylen = skb->len - hdr_len;
hnae_set_bit(*type_cs_vlan_tso, hnae3_set_bit(*type_cs_vlan_tso,
HNS3_TXD_TSO_B, 1); HNS3_TXD_TSO_B, 1);
/* get MSS for TSO */ /* get MSS for TSO */
*mss = skb_shinfo(skb)->gso_size; *mss = skb_shinfo(skb)->gso_size;
...@@ -586,21 +586,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, ...@@ -586,21 +586,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute L2 header size for normal packet, defined in 2 Bytes */ /* compute L2 header size for normal packet, defined in 2 Bytes */
l2_len = l3.hdr - skb->data; l2_len = l3.hdr - skb->data;
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
HNS3_TXD_L2LEN_S, l2_len >> 1); HNS3_TXD_L2LEN_S, l2_len >> 1);
/* tunnel packet*/ /* tunnel packet*/
if (skb->encapsulation) { if (skb->encapsulation) {
/* compute OL2 header size, defined in 2 Bytes */ /* compute OL2 header size, defined in 2 Bytes */
ol2_len = l2_len; ol2_len = l2_len;
hnae_set_field(*ol_type_vlan_len_msec, hnae3_set_field(*ol_type_vlan_len_msec,
HNS3_TXD_L2LEN_M, HNS3_TXD_L2LEN_M,
HNS3_TXD_L2LEN_S, ol2_len >> 1); HNS3_TXD_L2LEN_S, ol2_len >> 1);
/* compute OL3 header size, defined in 4 Bytes */ /* compute OL3 header size, defined in 4 Bytes */
ol3_len = l4.hdr - l3.hdr; ol3_len = l4.hdr - l3.hdr;
hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
HNS3_TXD_L3LEN_S, ol3_len >> 2); HNS3_TXD_L3LEN_S, ol3_len >> 2);
/* MAC in UDP, MAC in GRE (0x6558)*/ /* MAC in UDP, MAC in GRE (0x6558)*/
if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
...@@ -609,16 +609,17 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, ...@@ -609,16 +609,17 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute OL4 header size, defined in 4 Bytes. */ /* compute OL4 header size, defined in 4 Bytes. */
ol4_len = l2_hdr - l4.hdr; ol4_len = l2_hdr - l4.hdr;
hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, hnae3_set_field(*ol_type_vlan_len_msec,
HNS3_TXD_L4LEN_S, ol4_len >> 2); HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
ol4_len >> 2);
/* switch IP header ptr from outer to inner header */ /* switch IP header ptr from outer to inner header */
l3.hdr = skb_inner_network_header(skb); l3.hdr = skb_inner_network_header(skb);
/* compute inner l2 header size, defined in 2 Bytes. */ /* compute inner l2 header size, defined in 2 Bytes. */
l2_len = l3.hdr - l2_hdr; l2_len = l3.hdr - l2_hdr;
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
HNS3_TXD_L2LEN_S, l2_len >> 1); HNS3_TXD_L2LEN_S, l2_len >> 1);
} else { } else {
/* skb packet types not supported by hardware, /* skb packet types not supported by hardware,
* txbd len fild doesn't be filled. * txbd len fild doesn't be filled.
...@@ -634,22 +635,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, ...@@ -634,22 +635,24 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
/* compute inner(/normal) L3 header size, defined in 4 Bytes */ /* compute inner(/normal) L3 header size, defined in 4 Bytes */
l3_len = l4.hdr - l3.hdr; l3_len = l4.hdr - l3.hdr;
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
HNS3_TXD_L3LEN_S, l3_len >> 2); HNS3_TXD_L3LEN_S, l3_len >> 2);
/* compute inner(/normal) L4 header size, defined in 4 Bytes */ /* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) { switch (l4_proto) {
case IPPROTO_TCP: case IPPROTO_TCP:
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
HNS3_TXD_L4LEN_S, l4.tcp->doff); HNS3_TXD_L4LEN_S, l4.tcp->doff);
break; break;
case IPPROTO_SCTP: case IPPROTO_SCTP:
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); HNS3_TXD_L4LEN_S,
(sizeof(struct sctphdr) >> 2));
break; break;
case IPPROTO_UDP: case IPPROTO_UDP:
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); HNS3_TXD_L4LEN_S,
(sizeof(struct udphdr) >> 2));
break; break;
default: default:
/* skb packet types not supported by hardware, /* skb packet types not supported by hardware,
...@@ -703,32 +706,34 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, ...@@ -703,32 +706,34 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
/* define outer network header type.*/ /* define outer network header type.*/
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
if (skb_is_gso(skb)) if (skb_is_gso(skb))
hnae_set_field(*ol_type_vlan_len_msec, hnae3_set_field(*ol_type_vlan_len_msec,
HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, HNS3_TXD_OL3T_M,
HNS3_OL3T_IPV4_CSUM); HNS3_TXD_OL3T_S,
HNS3_OL3T_IPV4_CSUM);
else else
hnae_set_field(*ol_type_vlan_len_msec, hnae3_set_field(*ol_type_vlan_len_msec,
HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, HNS3_TXD_OL3T_M,
HNS3_OL3T_IPV4_NO_CSUM); HNS3_TXD_OL3T_S,
HNS3_OL3T_IPV4_NO_CSUM);
} else if (skb->protocol == htons(ETH_P_IPV6)) { } else if (skb->protocol == htons(ETH_P_IPV6)) {
hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
} }
/* define tunnel type(OL4).*/ /* define tunnel type(OL4).*/
switch (l4_proto) { switch (l4_proto) {
case IPPROTO_UDP: case IPPROTO_UDP:
hnae_set_field(*ol_type_vlan_len_msec, hnae3_set_field(*ol_type_vlan_len_msec,
HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_M,
HNS3_TXD_TUNTYPE_S, HNS3_TXD_TUNTYPE_S,
HNS3_TUN_MAC_IN_UDP); HNS3_TUN_MAC_IN_UDP);
break; break;
case IPPROTO_GRE: case IPPROTO_GRE:
hnae_set_field(*ol_type_vlan_len_msec, hnae3_set_field(*ol_type_vlan_len_msec,
HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_M,
HNS3_TXD_TUNTYPE_S, HNS3_TXD_TUNTYPE_S,
HNS3_TUN_NVGRE); HNS3_TUN_NVGRE);
break; break;
default: default:
/* drop the skb tunnel packet if hardware don't support, /* drop the skb tunnel packet if hardware don't support,
...@@ -749,43 +754,43 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, ...@@ -749,43 +754,43 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
} }
if (l3.v4->version == 4) { if (l3.v4->version == 4) {
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
HNS3_TXD_L3T_S, HNS3_L3T_IPV4); HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
/* the stack computes the IP header already, the only time we /* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO. * need the hardware to recompute it is in the case of TSO.
*/ */
if (skb_is_gso(skb)) if (skb_is_gso(skb))
hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
} else if (l3.v6->version == 6) { } else if (l3.v6->version == 6) {
hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
HNS3_TXD_L3T_S, HNS3_L3T_IPV6); HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
} }
switch (l4_proto) { switch (l4_proto) {
case IPPROTO_TCP: case IPPROTO_TCP:
hnae_set_field(*type_cs_vlan_tso, hnae3_set_field(*type_cs_vlan_tso,
HNS3_TXD_L4T_M, HNS3_TXD_L4T_M,
HNS3_TXD_L4T_S, HNS3_TXD_L4T_S,
HNS3_L4T_TCP); HNS3_L4T_TCP);
break; break;
case IPPROTO_UDP: case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb)) if (hns3_tunnel_csum_bug(skb))
break; break;
hnae_set_field(*type_cs_vlan_tso, hnae3_set_field(*type_cs_vlan_tso,
HNS3_TXD_L4T_M, HNS3_TXD_L4T_M,
HNS3_TXD_L4T_S, HNS3_TXD_L4T_S,
HNS3_L4T_UDP); HNS3_L4T_UDP);
break; break;
case IPPROTO_SCTP: case IPPROTO_SCTP:
hnae_set_field(*type_cs_vlan_tso, hnae3_set_field(*type_cs_vlan_tso,
HNS3_TXD_L4T_M, HNS3_TXD_L4T_M,
HNS3_TXD_L4T_S, HNS3_TXD_L4T_S,
HNS3_L4T_SCTP); HNS3_L4T_SCTP);
break; break;
default: default:
/* drop the skb tunnel packet if hardware don't support, /* drop the skb tunnel packet if hardware don't support,
...@@ -807,11 +812,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, ...@@ -807,11 +812,11 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
{ {
/* Config bd buffer end */ /* Config bd buffer end */
hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
HNS3_TXD_BDTYPE_S, 0); HNS3_TXD_BDTYPE_S, 0);
hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
} }
static int hns3_fill_desc_vtags(struct sk_buff *skb, static int hns3_fill_desc_vtags(struct sk_buff *skb,
...@@ -844,10 +849,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, ...@@ -844,10 +849,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
* and use inner_vtag in one tag case. * and use inner_vtag in one tag case.
*/ */
if (skb->protocol == htons(ETH_P_8021Q)) { if (skb->protocol == htons(ETH_P_8021Q)) {
hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
*out_vtag = vlan_tag; *out_vtag = vlan_tag;
} else { } else {
hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag; *inner_vtag = vlan_tag;
} }
} else if (skb->protocol == htons(ETH_P_8021Q)) { } else if (skb->protocol == htons(ETH_P_8021Q)) {
...@@ -1135,7 +1140,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1135,7 +1140,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
wmb(); /* Commit all data before submit */ wmb(); /* Commit all data before submit */
hnae_queue_xmit(ring->tqp, buf_num); hnae3_queue_xmit(ring->tqp, buf_num);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -1703,7 +1708,7 @@ static void hns3_set_default_feature(struct net_device *netdev) ...@@ -1703,7 +1708,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
static int hns3_alloc_buffer(struct hns3_enet_ring *ring, static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb) struct hns3_desc_cb *cb)
{ {
unsigned int order = hnae_page_order(ring); unsigned int order = hnae3_page_order(ring);
struct page *p; struct page *p;
p = dev_alloc_pages(order); p = dev_alloc_pages(order);
...@@ -1714,7 +1719,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, ...@@ -1714,7 +1719,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->page_offset = 0; cb->page_offset = 0;
cb->reuse_flag = 0; cb->reuse_flag = 0;
cb->buf = page_address(p); cb->buf = page_address(p);
cb->length = hnae_page_size(ring); cb->length = hnae3_page_size(ring);
cb->type = DESC_TYPE_PAGE; cb->type = DESC_TYPE_PAGE;
return 0; return 0;
...@@ -1780,33 +1785,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring) ...@@ -1780,33 +1785,27 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
/* free desc along with its attached buffer */ /* free desc along with its attached buffer */
static void hns3_free_desc(struct hns3_enet_ring *ring) static void hns3_free_desc(struct hns3_enet_ring *ring)
{ {
int size = ring->desc_num * sizeof(ring->desc[0]);
hns3_free_buffers(ring); hns3_free_buffers(ring);
dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, if (ring->desc) {
ring->desc_num * sizeof(ring->desc[0]), dma_free_coherent(ring_to_dev(ring), size,
DMA_BIDIRECTIONAL); ring->desc, ring->desc_dma_addr);
ring->desc_dma_addr = 0; ring->desc = NULL;
kfree(ring->desc); }
ring->desc = NULL;
} }
static int hns3_alloc_desc(struct hns3_enet_ring *ring) static int hns3_alloc_desc(struct hns3_enet_ring *ring)
{ {
int size = ring->desc_num * sizeof(ring->desc[0]); int size = ring->desc_num * sizeof(ring->desc[0]);
ring->desc = kzalloc(size, GFP_KERNEL); ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
&ring->desc_dma_addr,
GFP_KERNEL);
if (!ring->desc) if (!ring->desc)
return -ENOMEM; return -ENOMEM;
ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
ring->desc_dma_addr = 0;
kfree(ring->desc);
ring->desc = NULL;
return -ENOMEM;
}
return 0; return 0;
} }
...@@ -1887,7 +1886,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, ...@@ -1887,7 +1886,7 @@ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
(*pkts) += (desc_cb->type == DESC_TYPE_SKB); (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length; (*bytes) += desc_cb->length;
/* desc_cb will be cleaned, after hnae_free_buffer_detach*/ /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
hns3_free_buffer_detach(ring, ring->next_to_clean); hns3_free_buffer_detach(ring, ring->next_to_clean);
ring_ptr_move_fw(ring, next_to_clean); ring_ptr_move_fw(ring, next_to_clean);
...@@ -2016,15 +2015,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -2016,15 +2015,15 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
bool twobufs; bool twobufs;
twobufs = ((PAGE_SIZE < 8192) && twobufs = ((PAGE_SIZE < 8192) &&
hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
desc = &ring->desc[ring->next_to_clean]; desc = &ring->desc[ring->next_to_clean];
size = le16_to_cpu(desc->rx.size); size = le16_to_cpu(desc->rx.size);
truesize = hnae_buf_size(ring); truesize = hnae3_buf_size(ring);
if (!twobufs) if (!twobufs)
last_offset = hnae_page_size(ring) - hnae_buf_size(ring); last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize); size - pull_len, truesize);
...@@ -2076,13 +2075,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, ...@@ -2076,13 +2075,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return; return;
/* check if hardware has done checksum */ /* check if hardware has done checksum */
if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
return; return;
if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
hnae_get_bit(l234info, HNS3_RXD_L4E_B) || hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
netdev_err(netdev, "L3/L4 error pkt\n"); netdev_err(netdev, "L3/L4 error pkt\n");
u64_stats_update_begin(&ring->syncp); u64_stats_update_begin(&ring->syncp);
ring->stats.l3l4_csum_err++; ring->stats.l3l4_csum_err++;
...@@ -2091,12 +2090,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, ...@@ -2091,12 +2090,13 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return; return;
} }
l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
HNS3_RXD_L3ID_S); HNS3_RXD_L3ID_S);
l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
HNS3_RXD_L4ID_S); HNS3_RXD_L4ID_S);
ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
HNS3_RXD_OL4ID_S);
switch (ol4_type) { switch (ol4_type) {
case HNS3_OL4_TYPE_MAC_IN_UDP: case HNS3_OL4_TYPE_MAC_IN_UDP:
case HNS3_OL4_TYPE_NVGRE: case HNS3_OL4_TYPE_NVGRE:
...@@ -2135,8 +2135,8 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, ...@@ -2135,8 +2135,8 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
#define HNS3_STRP_OUTER_VLAN 0x1 #define HNS3_STRP_OUTER_VLAN 0x1
#define HNS3_STRP_INNER_VLAN 0x2 #define HNS3_STRP_INNER_VLAN 0x2
switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M, switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
HNS3_RXD_STRP_TAGP_S)) { HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN: case HNS3_STRP_OUTER_VLAN:
vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
break; break;
...@@ -2174,7 +2174,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -2174,7 +2174,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
bd_base_info = le32_to_cpu(desc->rx.bd_base_info); bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Check valid BD */ /* Check valid BD */
if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
return -EFAULT; return -EFAULT;
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
...@@ -2229,7 +2229,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -2229,7 +2229,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
ring_ptr_move_fw(ring, next_to_clean); ring_ptr_move_fw(ring, next_to_clean);
while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
desc = &ring->desc[ring->next_to_clean]; desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info); bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
...@@ -2257,7 +2257,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -2257,7 +2257,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
vlan_tag); vlan_tag);
} }
if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
netdev_err(netdev, "no valid bd,%016llx,%016llx\n", netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
((u64 *)desc)[0], ((u64 *)desc)[1]); ((u64 *)desc)[0], ((u64 *)desc)[1]);
u64_stats_update_begin(&ring->syncp); u64_stats_update_begin(&ring->syncp);
...@@ -2269,7 +2269,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -2269,7 +2269,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
} }
if (unlikely((!desc->rx.pkt_len) || if (unlikely((!desc->rx.pkt_len) ||
hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
netdev_err(netdev, "truncated pkt\n"); netdev_err(netdev, "truncated pkt\n");
u64_stats_update_begin(&ring->syncp); u64_stats_update_begin(&ring->syncp);
ring->stats.err_pkt_len++; ring->stats.err_pkt_len++;
...@@ -2279,7 +2279,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -2279,7 +2279,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return -EFAULT; return -EFAULT;
} }
if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
netdev_err(netdev, "L2 error pkt\n"); netdev_err(netdev, "L2 error pkt\n");
u64_stats_update_begin(&ring->syncp); u64_stats_update_begin(&ring->syncp);
ring->stats.l2_err++; ring->stats.l2_err++;
...@@ -2532,10 +2532,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, ...@@ -2532,10 +2532,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
tx_ring = tqp_vector->tx_group.ring; tx_ring = tqp_vector->tx_group.ring;
if (tx_ring) { if (tx_ring) {
cur_chain->tqp_index = tx_ring->tqp->tqp_index; cur_chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX); HNAE3_RING_TYPE_TX);
hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
cur_chain->next = NULL; cur_chain->next = NULL;
...@@ -2549,12 +2549,12 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, ...@@ -2549,12 +2549,12 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->next = chain; cur_chain->next = chain;
chain->tqp_index = tx_ring->tqp->tqp_index; chain->tqp_index = tx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX); HNAE3_RING_TYPE_TX);
hnae_set_field(chain->int_gl_idx, hnae3_set_field(chain->int_gl_idx,
HNAE3_RING_GL_IDX_M, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_IDX_S,
HNAE3_RING_GL_TX); HNAE3_RING_GL_TX);
cur_chain = chain; cur_chain = chain;
} }
...@@ -2564,10 +2564,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, ...@@ -2564,10 +2564,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
if (!tx_ring && rx_ring) { if (!tx_ring && rx_ring) {
cur_chain->next = NULL; cur_chain->next = NULL;
cur_chain->tqp_index = rx_ring->tqp->tqp_index; cur_chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX); HNAE3_RING_TYPE_RX);
hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
rx_ring = rx_ring->next; rx_ring = rx_ring->next;
} }
...@@ -2579,10 +2579,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, ...@@ -2579,10 +2579,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain->next = chain; cur_chain->next = chain;
chain->tqp_index = rx_ring->tqp->tqp_index; chain->tqp_index = rx_ring->tqp->tqp_index;
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX); HNAE3_RING_TYPE_RX);
hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
cur_chain = chain; cur_chain = chain;
...@@ -2805,7 +2805,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ...@@ -2805,7 +2805,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->io_base = q->io_base; ring->io_base = q->io_base;
} }
hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
ring->tqp = q; ring->tqp = q;
ring->desc = NULL; ring->desc = NULL;
......
...@@ -499,7 +499,6 @@ struct hns3_enet_tqp_vector { ...@@ -499,7 +499,6 @@ struct hns3_enet_tqp_vector {
u16 num_tqps; /* total number of tqps in TQP vector */ u16 num_tqps; /* total number of tqps in TQP vector */
cpumask_t affinity_mask;
char name[HNAE3_INT_NAME_LEN]; char name[HNAE3_INT_NAME_LEN];
/* when 0 should adjust interrupt coalesce parameter */ /* when 0 should adjust interrupt coalesce parameter */
...@@ -591,7 +590,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) ...@@ -591,7 +590,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_write_dev(a, reg, value) \ #define hns3_write_dev(a, reg, value) \
hns3_write_reg((a)->io_base, (reg), (value)) hns3_write_reg((a)->io_base, (reg), (value))
#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ #define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) #define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
...@@ -601,9 +600,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) ...@@ -601,9 +600,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) #define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
#define hnae_buf_size(_ring) ((_ring)->buf_size) #define hnae3_buf_size(_ring) ((_ring)->buf_size)
#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring))) #define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring)) #define hnae3_page_size(_ring) (PAGE_SIZE << hnae3_page_order(_ring))
/* iterator for handling rings in ring group */ /* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \ #define hns3_for_each_ring(pos, head) \
......
...@@ -18,8 +18,7 @@ ...@@ -18,8 +18,7 @@
#include "hclge_main.h" #include "hclge_main.h"
#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ) #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \
DMA_TO_DEVICE : DMA_FROM_DEVICE)
#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
static int hclge_ring_space(struct hclge_cmq_ring *ring) static int hclge_ring_space(struct hclge_cmq_ring *ring)
...@@ -46,31 +45,24 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) ...@@ -46,31 +45,24 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
{ {
int size = ring->desc_num * sizeof(struct hclge_desc); int size = ring->desc_num * sizeof(struct hclge_desc);
ring->desc = kzalloc(size, GFP_KERNEL); ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
size, &ring->desc_dma_addr,
GFP_KERNEL);
if (!ring->desc) if (!ring->desc)
return -ENOMEM; return -ENOMEM;
ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
ring->desc_dma_addr = 0;
kfree(ring->desc);
ring->desc = NULL;
return -ENOMEM;
}
return 0; return 0;
} }
static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring) static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
{ {
dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, int size = ring->desc_num * sizeof(struct hclge_desc);
ring->desc_num * sizeof(ring->desc[0]),
DMA_BIDIRECTIONAL);
ring->desc_dma_addr = 0; if (ring->desc) {
kfree(ring->desc); dma_free_coherent(cmq_ring_to_dev(ring), size,
ring->desc = NULL; ring->desc, ring->desc_dma_addr);
ring->desc = NULL;
}
} }
static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type) static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
...@@ -111,8 +103,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, ...@@ -111,8 +103,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
if (is_read) if (is_read)
desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
else
desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
} }
static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
...@@ -154,31 +144,20 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw) ...@@ -154,31 +144,20 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw)
{ {
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_cmq_ring *csq = &hw->cmq.csq; struct hclge_cmq_ring *csq = &hw->cmq.csq;
u16 ntc = csq->next_to_clean;
struct hclge_desc *desc;
int clean = 0;
u32 head; u32 head;
int clean;
desc = &csq->desc[ntc];
head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
rmb(); /* Make sure head is ready before touch any data */ rmb(); /* Make sure head is ready before touch any data */
if (!is_valid_csq_clean_head(csq, head)) { if (!is_valid_csq_clean_head(csq, head)) {
dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head, dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
csq->next_to_use, csq->next_to_clean); csq->next_to_use, csq->next_to_clean);
return 0; return 0;
} }
while (head != ntc) { clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
memset(desc, 0, sizeof(*desc)); csq->next_to_clean = head;
ntc++;
if (ntc == csq->desc_num)
ntc = 0;
desc = &csq->desc[ntc];
clean++;
}
csq->next_to_clean = ntc;
return clean; return clean;
} }
...@@ -256,33 +235,34 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) ...@@ -256,33 +235,34 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
*/ */
if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) { if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
do { do {
if (hclge_cmd_csq_done(hw)) if (hclge_cmd_csq_done(hw)) {
complete = true;
break; break;
}
udelay(1); udelay(1);
timeout++; timeout++;
} while (timeout < hw->cmq.tx_timeout); } while (timeout < hw->cmq.tx_timeout);
} }
if (hclge_cmd_csq_done(hw)) { if (!complete) {
complete = true; retval = -EAGAIN;
} else {
handle = 0; handle = 0;
while (handle < num) { while (handle < num) {
/* Get the result of hardware write back */ /* Get the result of hardware write back */
desc_to_use = &hw->cmq.csq.desc[ntc]; desc_to_use = &hw->cmq.csq.desc[ntc];
desc[handle] = *desc_to_use; desc[handle] = *desc_to_use;
pr_debug("Get cmd desc:\n");
if (likely(!hclge_is_special_opcode(opcode))) if (likely(!hclge_is_special_opcode(opcode)))
desc_ret = le16_to_cpu(desc[handle].retval); desc_ret = le16_to_cpu(desc[handle].retval);
else else
desc_ret = le16_to_cpu(desc[0].retval); desc_ret = le16_to_cpu(desc[0].retval);
if ((enum hclge_cmd_return_status)desc_ret == if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
HCLGE_CMD_EXEC_SUCCESS)
retval = 0; retval = 0;
else else
retval = -EIO; retval = -EIO;
hw->cmq.last_status = (enum hclge_cmd_status)desc_ret; hw->cmq.last_status = desc_ret;
ntc++; ntc++;
handle++; handle++;
if (ntc == hw->cmq.csq.desc_num) if (ntc == hw->cmq.csq.desc_num)
...@@ -290,9 +270,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) ...@@ -290,9 +270,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
} }
} }
if (!complete)
retval = -EAGAIN;
/* Clean the command send queue */ /* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw); handle = hclge_cmd_csq_clean(hw);
if (handle != num) { if (handle != num) {
......
...@@ -939,8 +939,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) ...@@ -939,8 +939,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
if (hnae3_dev_roce_supported(hdev)) { if (hnae3_dev_roce_supported(hdev)) {
hdev->num_roce_msi = hdev->num_roce_msi =
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
/* PF should have NIC vectors and Roce vectors, /* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors. * NIC vectors are queued before Roce vectors.
...@@ -948,8 +948,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) ...@@ -948,8 +948,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
} else { } else {
hdev->num_msi = hdev->num_msi =
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
} }
return 0; return 0;
...@@ -1038,38 +1038,38 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) ...@@ -1038,38 +1038,38 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
req = (struct hclge_cfg_param_cmd *)desc[0].data; req = (struct hclge_cfg_param_cmd *)desc[0].data;
/* get the configuration */ /* get the configuration */
cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
HCLGE_CFG_VMDQ_M, HCLGE_CFG_VMDQ_M,
HCLGE_CFG_VMDQ_S); HCLGE_CFG_VMDQ_S);
cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
HCLGE_CFG_TQP_DESC_N_M, HCLGE_CFG_TQP_DESC_N_M,
HCLGE_CFG_TQP_DESC_N_S); HCLGE_CFG_TQP_DESC_N_S);
cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_PHY_ADDR_M, HCLGE_CFG_PHY_ADDR_M,
HCLGE_CFG_PHY_ADDR_S); HCLGE_CFG_PHY_ADDR_S);
cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_MEDIA_TP_M, HCLGE_CFG_MEDIA_TP_M,
HCLGE_CFG_MEDIA_TP_S); HCLGE_CFG_MEDIA_TP_S);
cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_RX_BUF_LEN_M, HCLGE_CFG_RX_BUF_LEN_M,
HCLGE_CFG_RX_BUF_LEN_S); HCLGE_CFG_RX_BUF_LEN_S);
/* get mac_address */ /* get mac_address */
mac_addr_tmp = __le32_to_cpu(req->param[2]); mac_addr_tmp = __le32_to_cpu(req->param[2]);
mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
HCLGE_CFG_MAC_ADDR_H_M, HCLGE_CFG_MAC_ADDR_H_M,
HCLGE_CFG_MAC_ADDR_H_S); HCLGE_CFG_MAC_ADDR_H_S);
mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
HCLGE_CFG_DEFAULT_SPEED_M, HCLGE_CFG_DEFAULT_SPEED_M,
HCLGE_CFG_DEFAULT_SPEED_S); HCLGE_CFG_DEFAULT_SPEED_S);
cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]), cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
HCLGE_CFG_RSS_SIZE_M, HCLGE_CFG_RSS_SIZE_M,
HCLGE_CFG_RSS_SIZE_S); HCLGE_CFG_RSS_SIZE_S);
for (i = 0; i < ETH_ALEN; i++) for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
...@@ -1077,9 +1077,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) ...@@ -1077,9 +1077,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
req = (struct hclge_cfg_param_cmd *)desc[1].data; req = (struct hclge_cfg_param_cmd *)desc[1].data;
cfg->numa_node_map = __le32_to_cpu(req->param[0]); cfg->numa_node_map = __le32_to_cpu(req->param[0]);
cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]), cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_SPEED_ABILITY_M, HCLGE_CFG_SPEED_ABILITY_M,
HCLGE_CFG_SPEED_ABILITY_S); HCLGE_CFG_SPEED_ABILITY_S);
} }
/* hclge_get_cfg: query the static parameter from flash /* hclge_get_cfg: query the static parameter from flash
...@@ -1098,11 +1098,11 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) ...@@ -1098,11 +1098,11 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
req = (struct hclge_cfg_param_cmd *)desc[i].data; req = (struct hclge_cfg_param_cmd *)desc[i].data;
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
true); true);
hnae_set_field(offset, HCLGE_CFG_OFFSET_M, hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
/* Len should be united by 4 bytes when send to hardware */ /* Len should be united by 4 bytes when send to hardware */
hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
req->offset = cpu_to_le32(offset); req->offset = cpu_to_le32(offset);
} }
...@@ -1189,7 +1189,7 @@ static int hclge_configure(struct hclge_dev *hdev) ...@@ -1189,7 +1189,7 @@ static int hclge_configure(struct hclge_dev *hdev)
/* Currently not support uncontiuous tc */ /* Currently not support uncontiuous tc */
for (i = 0; i < hdev->tm_info.num_tc; i++) for (i = 0; i < hdev->tm_info.num_tc; i++)
hnae_set_bit(hdev->hw_tc_map, i, 1); hnae3_set_bit(hdev->hw_tc_map, i, 1);
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
...@@ -1208,13 +1208,13 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, ...@@ -1208,13 +1208,13 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
req = (struct hclge_cfg_tso_status_cmd *)desc.data; req = (struct hclge_cfg_tso_status_cmd *)desc.data;
tso_mss = 0; tso_mss = 0;
hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
HCLGE_TSO_MSS_MIN_S, tso_mss_min); HCLGE_TSO_MSS_MIN_S, tso_mss_min);
req->tso_mss_min = cpu_to_le16(tso_mss); req->tso_mss_min = cpu_to_le16(tso_mss);
tso_mss = 0; tso_mss = 0;
hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
HCLGE_TSO_MSS_MIN_S, tso_mss_max); HCLGE_TSO_MSS_MIN_S, tso_mss_max);
req->tso_mss_max = cpu_to_le16(tso_mss); req->tso_mss_max = cpu_to_le16(tso_mss);
return hclge_cmd_send(&hdev->hw, &desc, 1); return hclge_cmd_send(&hdev->hw, &desc, 1);
...@@ -2118,48 +2118,48 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) ...@@ -2118,48 +2118,48 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
switch (speed) { switch (speed) {
case HCLGE_MAC_SPEED_10M: case HCLGE_MAC_SPEED_10M:
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, 6); HCLGE_CFG_SPEED_S, 6);
break; break;
case HCLGE_MAC_SPEED_100M: case HCLGE_MAC_SPEED_100M:
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, 7); HCLGE_CFG_SPEED_S, 7);
break; break;
case HCLGE_MAC_SPEED_1G: case HCLGE_MAC_SPEED_1G:
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, 0); HCLGE_CFG_SPEED_S, 0);
break; break;
case HCLGE_MAC_SPEED_10G: case HCLGE_MAC_SPEED_10G:
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, 1); HCLGE_CFG_SPEED_S, 1);
break; break;
case HCLGE_MAC_SPEED_25G: case HCLGE_MAC_SPEED_25G:
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, 2); HCLGE_CFG_SPEED_S, 2);
break; break;
case HCLGE_MAC_SPEED_40G: case HCLGE_MAC_SPEED_40G:
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, 3); HCLGE_CFG_SPEED_S, 3);
break; break;
case HCLGE_MAC_SPEED_50G: case HCLGE_MAC_SPEED_50G:
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, 4); HCLGE_CFG_SPEED_S, 4);
break; break;
case HCLGE_MAC_SPEED_100G: case HCLGE_MAC_SPEED_100G:
hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, 5); HCLGE_CFG_SPEED_S, 5);
break; break;
default: default:
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
return -EINVAL; return -EINVAL;
} }
hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1); 1);
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
...@@ -2201,9 +2201,9 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, ...@@ -2201,9 +2201,9 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
return ret; return ret;
} }
*duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
HCLGE_QUERY_SPEED_S); HCLGE_QUERY_SPEED_S);
ret = hclge_parse_speed(speed_tmp, speed); ret = hclge_parse_speed(speed_tmp, speed);
if (ret) { if (ret) {
...@@ -2225,7 +2225,7 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) ...@@ -2225,7 +2225,7 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
req = (struct hclge_config_auto_neg_cmd *)desc.data; req = (struct hclge_config_auto_neg_cmd *)desc.data;
hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
req->cfg_an_cmd_flag = cpu_to_le32(flag); req->cfg_an_cmd_flag = cpu_to_le32(flag);
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
...@@ -2269,8 +2269,8 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, ...@@ -2269,8 +2269,8 @@ static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
mask_vlan ? 1 : 0); mask_vlan ? 1 : 0);
ether_addr_copy(req->mac_mask, mac_mask); ether_addr_copy(req->mac_mask, mac_mask);
status = hclge_cmd_send(&hdev->hw, &desc, 1); status = hclge_cmd_send(&hdev->hw, &desc, 1);
...@@ -2711,7 +2711,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev) ...@@ -2711,7 +2711,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
} }
val = hclge_read_dev(&hdev->hw, reg); val = hclge_read_dev(&hdev->hw, reg);
while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS); msleep(HCLGE_RESET_WATI_MS);
val = hclge_read_dev(&hdev->hw, reg); val = hclge_read_dev(&hdev->hw, reg);
cnt++; cnt++;
...@@ -2733,8 +2733,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) ...@@ -2733,8 +2733,7 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
int ret; int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0); hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
req->fun_reset_vfid = func_id; req->fun_reset_vfid = func_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
...@@ -2753,13 +2752,13 @@ static void hclge_do_reset(struct hclge_dev *hdev) ...@@ -2753,13 +2752,13 @@ static void hclge_do_reset(struct hclge_dev *hdev)
switch (hdev->reset_type) { switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET: case HNAE3_GLOBAL_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
dev_info(&pdev->dev, "Global Reset requested\n"); dev_info(&pdev->dev, "Global Reset requested\n");
break; break;
case HNAE3_CORE_RESET: case HNAE3_CORE_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1); hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
dev_info(&pdev->dev, "Core Reset requested\n"); dev_info(&pdev->dev, "Core Reset requested\n");
break; break;
...@@ -3116,11 +3115,11 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, ...@@ -3116,11 +3115,11 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
u16 mode = 0; u16 mode = 0;
hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M, hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
HCLGE_RSS_TC_SIZE_S, tc_size[i]); HCLGE_RSS_TC_SIZE_S, tc_size[i]);
hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M, hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
req->rss_tc_mode[i] = cpu_to_le16(mode); req->rss_tc_mode[i] = cpu_to_le16(mode);
} }
...@@ -3497,16 +3496,16 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, ...@@ -3497,16 +3496,16 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
i = 0; i = 0;
for (node = ring_chain; node; node = node->next) { for (node = ring_chain; node; node = node->next) {
tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
HCLGE_INT_TYPE_S, HCLGE_INT_TYPE_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
HCLGE_TQP_ID_S, node->tqp_index); HCLGE_TQP_ID_S, node->tqp_index);
hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S, HCLGE_INT_GL_IDX_S,
hnae_get_field(node->int_gl_idx, hnae3_get_field(node->int_gl_idx,
HNAE3_RING_GL_IDX_M, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S)); HNAE3_RING_GL_IDX_S));
req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
...@@ -3654,20 +3653,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) ...@@ -3654,20 +3653,20 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
int ret; int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
...@@ -3695,7 +3694,7 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) ...@@ -3695,7 +3694,7 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en)
/* 2 Then setup the loopback flag */ /* 2 Then setup the loopback flag */
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
...@@ -3959,10 +3958,10 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, ...@@ -3959,10 +3958,10 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
req = (struct hclge_mta_filter_mode_cmd *)desc.data; req = (struct hclge_mta_filter_mode_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
enable); enable);
hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M,
HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel);
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
...@@ -3986,8 +3985,8 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, ...@@ -3986,8 +3985,8 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
enable); enable);
req->function_id = func_id; req->function_id = func_id;
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
...@@ -4013,10 +4012,10 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport, ...@@ -4013,10 +4012,10 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
HCLGE_CFG_MTA_ITEM_IDX_S, idx); HCLGE_CFG_MTA_ITEM_IDX_S, idx);
req->item_idx = cpu_to_le16(item_idx); req->item_idx = cpu_to_le16(item_idx);
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
...@@ -4263,17 +4262,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, ...@@ -4263,17 +4262,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
} }
memset(&req, 0, sizeof(req)); memset(&req, 0, sizeof(req));
hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
HCLGE_MAC_EPORT_PFID_S, 0);
req.egress_port = cpu_to_le16(egress_port); req.egress_port = cpu_to_le16(egress_port);
...@@ -4324,8 +4316,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, ...@@ -4324,8 +4316,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
} }
memset(&req, 0, sizeof(req)); memset(&req, 0, sizeof(req));
hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr); hclge_prepare_mac_addr(&req, addr);
ret = hclge_remove_mac_vlan_tbl(vport, &req); ret = hclge_remove_mac_vlan_tbl(vport, &req);
...@@ -4357,10 +4349,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, ...@@ -4357,10 +4349,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
return -EINVAL; return -EINVAL;
} }
memset(&req, 0, sizeof(req)); memset(&req, 0, sizeof(req));
hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr); hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) { if (!status) {
...@@ -4424,10 +4416,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, ...@@ -4424,10 +4416,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
} }
memset(&req, 0, sizeof(req)); memset(&req, 0, sizeof(req));
hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr); hclge_prepare_mac_addr(&req, addr);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (!status) { if (!status) {
...@@ -4808,19 +4800,19 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) ...@@ -4808,19 +4800,19 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
vcfg->accept_tag1 ? 1 : 0); vcfg->accept_tag1 ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
vcfg->accept_untag1 ? 1 : 0); vcfg->accept_untag1 ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
vcfg->accept_tag2 ? 1 : 0); vcfg->accept_tag2 ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
vcfg->accept_untag2 ? 1 : 0); vcfg->accept_untag2 ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
vcfg->insert_tag1_en ? 1 : 0); vcfg->insert_tag1_en ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
vcfg->insert_tag2_en ? 1 : 0); vcfg->insert_tag2_en ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
req->vf_bitmap[req->vf_offset] = req->vf_bitmap[req->vf_offset] =
...@@ -4846,14 +4838,14 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) ...@@ -4846,14 +4838,14 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
vcfg->strip_tag1_en ? 1 : 0); vcfg->strip_tag1_en ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
vcfg->strip_tag2_en ? 1 : 0); vcfg->strip_tag2_en ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
vcfg->vlan1_vlan_prionly ? 1 : 0); vcfg->vlan1_vlan_prionly ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
vcfg->vlan2_vlan_prionly ? 1 : 0); vcfg->vlan2_vlan_prionly ? 1 : 0);
req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
req->vf_bitmap[req->vf_offset] = req->vf_bitmap[req->vf_offset] =
...@@ -5049,7 +5041,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, ...@@ -5049,7 +5041,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
req = (struct hclge_reset_tqp_queue_cmd *)desc.data; req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
...@@ -5079,7 +5071,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) ...@@ -5079,7 +5071,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return ret; return ret;
} }
return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
} }
static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
...@@ -5386,12 +5378,12 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, ...@@ -5386,12 +5378,12 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
retval = phy_read(phydev, HCLGE_PHY_CSC_REG); retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
HCLGE_PHY_MDIX_CTRL_S); HCLGE_PHY_MDIX_CTRL_S);
retval = phy_read(phydev, HCLGE_PHY_CSS_REG); retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
...@@ -6164,8 +6156,8 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) ...@@ -6164,8 +6156,8 @@ static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
req = (struct hclge_set_led_state_cmd *)desc.data; req = (struct hclge_set_led_state_cmd *)desc.data;
hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
HCLGE_LED_LOCATE_STATE_S, locate_led_status); HCLGE_LED_LOCATE_STATE_S, locate_led_status);
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) if (ret)
...@@ -6295,7 +6287,6 @@ static const struct hnae3_ae_ops hclge_ops = { ...@@ -6295,7 +6287,6 @@ static const struct hnae3_ae_ops hclge_ops = {
static struct hnae3_ae_algo ae_algo = { static struct hnae3_ae_algo ae_algo = {
.ops = &hclge_ops, .ops = &hclge_ops,
.name = HCLGE_NAME,
.pdev_id_table = ae_algo_pci_tbl, .pdev_id_table = ae_algo_pci_tbl,
}; };
......
...@@ -128,12 +128,12 @@ static int hclge_get_ring_chain_from_mbx( ...@@ -128,12 +128,12 @@ static int hclge_get_ring_chain_from_mbx(
HCLGE_MBX_RING_NODE_VARIABLE_NUM)) HCLGE_MBX_RING_NODE_VARIABLE_NUM))
return -ENOMEM; return -ENOMEM;
hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]); hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
ring_chain->tqp_index = ring_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]); hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
hnae_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, hnae3_set_field(ring_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S, HCLGE_INT_GL_IDX_S,
req->msg[5]); req->msg[5]);
cur_chain = ring_chain; cur_chain = ring_chain;
...@@ -142,19 +142,19 @@ static int hclge_get_ring_chain_from_mbx( ...@@ -142,19 +142,19 @@ static int hclge_get_ring_chain_from_mbx(
if (!new_chain) if (!new_chain)
goto err; goto err;
hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]); HCLGE_MBX_RING_MAP_BASIC_MSG_NUM]);
new_chain->tqp_index = new_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp hclge_get_queue_id(vport->nic.kinfo.tqp
[req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + [req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]); HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 1]]);
hnae_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M, hnae3_set_field(new_chain->int_gl_idx, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S, HCLGE_INT_GL_IDX_S,
req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i + req->msg[HCLGE_MBX_RING_NODE_VARIABLE_NUM * i +
HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]); HCLGE_MBX_RING_MAP_BASIC_MSG_NUM + 2]);
cur_chain->next = new_chain; cur_chain->next = new_chain;
cur_chain = new_chain; cur_chain = new_chain;
...@@ -460,7 +460,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -460,7 +460,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n", "dropped invalid mailbox message, code = %d\n",
req->msg[0]); req->msg[0]);
......
...@@ -67,16 +67,16 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, ...@@ -67,16 +67,16 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
HCLGE_MDIO_PHYID_S, phyid); HCLGE_MDIO_PHYID_S, phyid);
hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
HCLGE_MDIO_PHYREG_S, regnum); HCLGE_MDIO_PHYREG_S, regnum);
hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
HCLGE_MDIO_CTRL_ST_S, 1); HCLGE_MDIO_CTRL_ST_S, 1);
hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
mdio_cmd->data_wr = cpu_to_le16(data); mdio_cmd->data_wr = cpu_to_le16(data);
...@@ -105,16 +105,16 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) ...@@ -105,16 +105,16 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
HCLGE_MDIO_PHYID_S, phyid); HCLGE_MDIO_PHYID_S, phyid);
hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
HCLGE_MDIO_PHYREG_S, regnum); HCLGE_MDIO_PHYREG_S, regnum);
hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
HCLGE_MDIO_CTRL_ST_S, 1); HCLGE_MDIO_CTRL_ST_S, 1);
hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
/* Read out phy data */ /* Read out phy data */
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
...@@ -125,7 +125,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) ...@@ -125,7 +125,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
return ret; return ret;
} }
if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
dev_err(&hdev->pdev->dev, "mdio read data error\n"); dev_err(&hdev->pdev->dev, "mdio read data error\n");
return -EIO; return -EIO;
} }
......
...@@ -1184,10 +1184,10 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) ...@@ -1184,10 +1184,10 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
u16 qs_id = vport->qs_offset + tc; u16 qs_id = vport->qs_offset + tc;
u8 grp, sub_grp; u8 grp, sub_grp;
grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M, grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
HCLGE_BP_GRP_ID_S); HCLGE_BP_GRP_ID_S);
sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
HCLGE_BP_SUB_GRP_ID_S); HCLGE_BP_SUB_GRP_ID_S);
if (i == grp) if (i == grp)
qs_bitmap |= (1 << sub_grp); qs_bitmap |= (1 << sub_grp);
......
...@@ -123,10 +123,11 @@ struct hclge_port_shapping_cmd { ...@@ -123,10 +123,11 @@ struct hclge_port_shapping_cmd {
}; };
#define hclge_tm_set_field(dest, string, val) \ #define hclge_tm_set_field(dest, string, val) \
hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ hnae3_set_field((dest), \
(HCLGE_TM_SHAP_##string##_LSH), val) (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH), val)
#define hclge_tm_get_field(src, string) \ #define hclge_tm_get_field(src, string) \
hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH)) (HCLGE_TM_SHAP_##string##_LSH))
int hclge_tm_schd_init(struct hclge_dev *hdev); int hclge_tm_schd_init(struct hclge_dev *hdev);
......
...@@ -76,32 +76,24 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) ...@@ -76,32 +76,24 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{ {
int size = ring->desc_num * sizeof(struct hclgevf_desc); int size = ring->desc_num * sizeof(struct hclgevf_desc);
ring->desc = kzalloc(size, GFP_KERNEL); ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring),
size, &ring->desc_dma_addr,
GFP_KERNEL);
if (!ring->desc) if (!ring->desc)
return -ENOMEM; return -ENOMEM;
ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
ring->desc_dma_addr = 0;
kfree(ring->desc);
ring->desc = NULL;
return -ENOMEM;
}
return 0; return 0;
} }
static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring) static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
{ {
dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, int size = ring->desc_num * sizeof(struct hclgevf_desc);
ring->desc_num * sizeof(ring->desc[0]),
hclgevf_ring_to_dma_dir(ring));
ring->desc_dma_addr = 0; if (ring->desc) {
kfree(ring->desc); dma_free_coherent(cmq_ring_to_dev(ring), size,
ring->desc = NULL; ring->desc, ring->desc_dma_addr);
ring->desc = NULL;
}
} }
static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
......
...@@ -450,12 +450,12 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) ...@@ -450,12 +450,12 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false); hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) { for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B, hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
(tc_valid[i] & 0x1)); (tc_valid[i] & 0x1));
hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M, hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
HCLGEVF_RSS_TC_SIZE_S, tc_size[i]); HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M, hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]); HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
} }
status = hclgevf_cmd_send(&hdev->hw, &desc, 1); status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status) if (status)
...@@ -582,11 +582,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, ...@@ -582,11 +582,11 @@ static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
} }
req->msg[idx_offset] = req->msg[idx_offset] =
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B); hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
req->msg[idx_offset + 1] = node->tqp_index; req->msg[idx_offset + 1] = node->tqp_index;
req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx, req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
HNAE3_RING_GL_IDX_M, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S); HNAE3_RING_GL_IDX_S);
i++; i++;
if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM - if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
...@@ -1000,8 +1000,8 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) ...@@ -1000,8 +1000,8 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
/* wait to check the hardware reset completion status */ /* wait to check the hardware reset completion status */
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) && while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
(cnt < HCLGEVF_RESET_WAIT_CNT)) { (cnt < HCLGEVF_RESET_WAIT_CNT)) {
msleep(HCLGEVF_RESET_WAIT_MS); msleep(HCLGEVF_RESET_WAIT_MS);
val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
cnt++; cnt++;
...@@ -1959,7 +1959,6 @@ static const struct hnae3_ae_ops hclgevf_ops = { ...@@ -1959,7 +1959,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
static struct hnae3_ae_algo ae_algovf = { static struct hnae3_ae_algo ae_algovf = {
.ops = &hclgevf_ops, .ops = &hclgevf_ops,
.name = HCLGEVF_NAME,
.pdev_id_table = ae_algovf_pci_tbl, .pdev_id_table = ae_algovf_pci_tbl,
}; };
......
...@@ -152,7 +152,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -152,7 +152,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n", "dropped invalid mailbox message, code = %d\n",
req->msg[0]); req->msg[0]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment