Commit ac6e9185 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-hns3-updates-for-next'

Huazhong Tan says:

====================
net: hns3: updates for -next

This series includes some updates for the HNS3 ethernet driver.

 #1~#6: add some updates related to the checksum offload.
 #7: add support for multiple TCs' MAC pauce mode.
====================

Link: https://lore.kernel.org/r/1606535510-44346-1-git-send-email-tanhuazhong@huawei.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 9e39394f d78e5b6a
......@@ -81,12 +81,13 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B,
HNAE3_DEV_SUPPORT_PTP_B,
HNAE3_DEV_SUPPORT_INT_QL_B,
HNAE3_DEV_SUPPORT_SIMPLE_BD_B,
HNAE3_DEV_SUPPORT_HW_TX_CSUM_B,
HNAE3_DEV_SUPPORT_TX_PUSH_B,
HNAE3_DEV_SUPPORT_PHY_IMP_B,
HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B,
HNAE3_DEV_SUPPORT_HW_PAD_B,
HNAE3_DEV_SUPPORT_STASH_B,
HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B,
};
#define hnae3_dev_fd_supported(hdev) \
......@@ -113,8 +114,8 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_dev_int_ql_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, (hdev)->ae_dev->caps)
#define hnae3_dev_simple_bd_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_SIMPLE_BD_B, (hdev)->ae_dev->caps)
#define hnae3_dev_hw_csum_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, (hdev)->ae_dev->caps)
#define hnae3_dev_tx_push_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, (hdev)->ae_dev->caps)
......
......@@ -178,6 +178,8 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
u32 tx_index, rx_index;
u32 q_num, value;
dma_addr_t addr;
u16 mss_hw_csum;
u32 l234info;
int cnt;
cnt = sscanf(&cmd_buf[8], "%u %u", &q_num, &tx_index);
......@@ -206,26 +208,46 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
tx_desc = &ring->desc[tx_index];
addr = le64_to_cpu(tx_desc->addr);
mss_hw_csum = le16_to_cpu(tx_desc->tx.mss_hw_csum);
dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
dev_info(dev, "(TX)addr: %pad\n", &addr);
dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.vlan_tag));
dev_info(dev, "(TX)send_size: %u\n",
le16_to_cpu(tx_desc->tx.send_size));
dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
if (mss_hw_csum & BIT(HNS3_TXD_HW_CS_B)) {
u32 offset = le32_to_cpu(tx_desc->tx.ol_type_vlan_len_msec);
u32 start = le32_to_cpu(tx_desc->tx.type_cs_vlan_tso_len);
dev_info(dev, "(TX)csum start: %u\n",
hnae3_get_field(start,
HNS3_TXD_CSUM_START_M,
HNS3_TXD_CSUM_START_S));
dev_info(dev, "(TX)csum offset: %u\n",
hnae3_get_field(offset,
HNS3_TXD_CSUM_OFFSET_M,
HNS3_TXD_CSUM_OFFSET_S));
} else {
dev_info(dev, "(TX)vlan_tso: %u\n",
tx_desc->tx.type_cs_vlan_tso);
dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
dev_info(dev, "(TX)vlan_tag: %u\n",
le16_to_cpu(tx_desc->tx.outer_vlan_tag));
dev_info(dev, "(TX)tv: %u\n", le16_to_cpu(tx_desc->tx.tv));
dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec);
dev_info(dev, "(TX)vlan_msec: %u\n",
tx_desc->tx.ol_type_vlan_msec);
dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
dev_info(dev, "(TX)paylen: %u\n", le32_to_cpu(tx_desc->tx.paylen));
}
dev_info(dev, "(TX)vlan_tag: %u\n",
le16_to_cpu(tx_desc->tx.outer_vlan_tag));
dev_info(dev, "(TX)tv: %u\n", le16_to_cpu(tx_desc->tx.tv));
dev_info(dev, "(TX)paylen_ol4cs: %u\n",
le32_to_cpu(tx_desc->tx.paylen_ol4cs));
dev_info(dev, "(TX)vld_ra_ri: %u\n",
le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss));
dev_info(dev, "(TX)mss_hw_csum: %u\n", mss_hw_csum);
ring = &priv->ring[q_num + h->kinfo.num_tqps];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
......@@ -233,10 +255,21 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
rx_desc = &ring->desc[rx_index];
addr = le64_to_cpu(rx_desc->addr);
l234info = le32_to_cpu(rx_desc->rx.l234_info);
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
dev_info(dev, "(RX)addr: %pad\n", &addr);
dev_info(dev, "(RX)l234_info: %u\n",
le32_to_cpu(rx_desc->rx.l234_info));
dev_info(dev, "(RX)l234_info: %u\n", l234info);
if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) {
u32 lo, hi;
lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M,
HNS3_RXD_L2_CSUM_L_S);
hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M,
HNS3_RXD_L2_CSUM_H_S);
dev_info(dev, "(RX)csum: %u\n", lo | hi << 8);
}
dev_info(dev, "(RX)pkt_len: %u\n", le16_to_cpu(rx_desc->rx.pkt_len));
dev_info(dev, "(RX)size: %u\n", le16_to_cpu(rx_desc->rx.size));
dev_info(dev, "(RX)rss_hash: %u\n", le32_to_cpu(rx_desc->rx.rss_hash));
......@@ -324,6 +357,11 @@ static void hns3_dbg_dev_caps(struct hnae3_handle *h)
test_bit(HNAE3_DEV_SUPPORT_PTP_B, caps) ? "yes" : "no");
dev_info(&h->pdev->dev, "support INT QL: %s\n",
test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, caps) ? "yes" : "no");
dev_info(&h->pdev->dev, "support HW TX csum: %s\n",
test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, caps) ? "yes" : "no");
dev_info(&h->pdev->dev, "support UDP tunnel csum: %s\n",
test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, caps) ?
"yes" : "no");
}
static void hns3_dbg_dev_specs(struct hnae3_handle *h)
......
......@@ -695,7 +695,7 @@ void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
}
}
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
u16 *mss, u32 *type_cs_vlan_tso)
{
u32 l4_offset, hdr_len;
......@@ -725,15 +725,6 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
SKB_GSO_GRE_CSUM |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
if ((!(skb_shinfo(skb)->gso_type &
SKB_GSO_PARTIAL)) &&
(skb_shinfo(skb)->gso_type &
SKB_GSO_UDP_TUNNEL_CSUM)) {
/* Software should clear the udp's checksum
* field when tso is needed.
*/
l4.udp->check = 0;
}
/* reset l3&l4 pointers from outer to inner headers */
l3.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
......@@ -762,9 +753,13 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
}
/* find the txbd field values */
*paylen = skb->len - hdr_len;
*paylen_fdop_ol4cs = skb->len - hdr_len;
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
/* offload outer UDP header checksum */
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1);
/* get MSS for TSO */
*mss = skb_shinfo(skb)->gso_size;
......@@ -833,8 +828,16 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
*/
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
struct hns3_nic_priv *priv = netdev_priv(skb->dev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
union l4_hdr_info l4;
/* device version above V3(include V3), the hardware can
* do this checksum offload.
*/
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
return false;
l4.hdr = skb_transport_header(skb);
if (!(!skb->encapsulation &&
......@@ -1055,15 +1058,31 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
return 0;
}
/* check if the hardware is capable of checksum offloading */
static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
{
struct hns3_nic_priv *priv = netdev_priv(skb->dev);
/* Kindly note, due to backward compatibility of the TX descriptor,
* HW checksum of the non-IP packets and GSO packets is handled at
* different place in the following code
*/
if (skb->csum_not_inet || skb_is_gso(skb) ||
!test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state))
return false;
return true;
}
static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
struct sk_buff *skb, struct hns3_desc *desc)
{
u32 ol_type_vlan_len_msec = 0;
u32 paylen_ol4cs = skb->len;
u32 type_cs_vlan_tso = 0;
u32 paylen = skb->len;
u16 mss_hw_csum = 0;
u16 inner_vtag = 0;
u16 out_vtag = 0;
u16 mss = 0;
int ret;
ret = hns3_handle_vtags(ring, skb);
......@@ -1088,6 +1107,17 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ol4_proto, il4_proto;
if (hns3_check_hw_tx_csum(skb)) {
/* set checksum start and offset, defined in 2 Bytes */
hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
skb_checksum_start_offset(skb) >> 1);
hns3_set_field(ol_type_vlan_len_msec,
HNS3_TXD_CSUM_OFFSET_S,
skb->csum_offset >> 1);
mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
goto out_hw_tx_csum;
}
skb_reset_mac_len(skb);
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
......@@ -1108,7 +1138,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
return ret;
}
ret = hns3_set_tso(skb, &paylen, &mss,
ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
&type_cs_vlan_tso);
if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
......@@ -1118,12 +1148,13 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
}
}
out_hw_tx_csum:
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs);
desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
......@@ -2326,39 +2357,32 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
netdev->hw_enc_features |= NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST;
NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST;
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_FILTER |
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
NETIF_F_FRAGLIST;
NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
netdev->vlan_features |=
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
netdev->vlan_features |= NETIF_F_RXCSUM |
NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
NETIF_F_FRAGLIST;
NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
NETIF_F_FRAGLIST;
NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
netdev->hw_features |= NETIF_F_GRO_HW;
......@@ -2376,6 +2400,25 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
}
if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) {
netdev->hw_features |= NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_HW_CSUM;
netdev->vlan_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
} else {
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
}
if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
......@@ -2798,6 +2841,22 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
return 0;
}
static void hns3_checksum_complete(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 l234info)
{
u32 lo, hi;
u64_stats_update_begin(&ring->syncp);
ring->stats.csum_complete++;
u64_stats_update_end(&ring->syncp);
skb->ip_summed = CHECKSUM_COMPLETE;
lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M,
HNS3_RXD_L2_CSUM_L_S);
hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M,
HNS3_RXD_L2_CSUM_H_S);
skb->csum = csum_unfold((__force __sum16)(lo | hi << 8));
}
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
u32 l234info, u32 bd_base_info, u32 ol_info)
{
......@@ -2812,6 +2871,11 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM))
return;
if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) {
hns3_checksum_complete(ring, skb, l234info);
return;
}
/* check if hardware has done checksum */
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
return;
......@@ -4157,6 +4221,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
/* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
netdev->max_mtu = HNS3_MAX_MTU;
if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
if (netif_msg_drv(handle))
......
......@@ -18,6 +18,7 @@ enum hns3_nic_state {
HNS3_NIC_STATE_SERVICE_INITED,
HNS3_NIC_STATE_SERVICE_SCHED,
HNS3_NIC_STATE2_RESET_REQUESTED,
HNS3_NIC_STATE_HW_TX_CSUM_ENABLE,
HNS3_NIC_STATE_MAX
};
......@@ -82,6 +83,12 @@ enum hns3_nic_state {
#define HNS3_RXD_STRP_TAGP_S 13
#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
#define HNS3_RXD_L2_CSUM_B 15
#define HNS3_RXD_L2_CSUM_L_S 4
#define HNS3_RXD_L2_CSUM_L_M (0xff << HNS3_RXD_L2_CSUM_L_S)
#define HNS3_RXD_L2_CSUM_H_S 24
#define HNS3_RXD_L2_CSUM_H_M (0xff << HNS3_RXD_L2_CSUM_H_S)
#define HNS3_RXD_L2E_B 16
#define HNS3_RXD_L3E_B 17
#define HNS3_RXD_L4E_B 18
......@@ -139,6 +146,9 @@ enum hns3_nic_state {
#define HNS3_TXD_L4LEN_S 24
#define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S)
#define HNS3_TXD_CSUM_START_S 8
#define HNS3_TXD_CSUM_START_M (0xffff << HNS3_TXD_CSUM_START_S)
#define HNS3_TXD_OL3T_S 0
#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S)
#define HNS3_TXD_OVLAN_B 2
......@@ -146,6 +156,9 @@ enum hns3_nic_state {
#define HNS3_TXD_TUNTYPE_S 4
#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S)
#define HNS3_TXD_CSUM_OFFSET_S 8
#define HNS3_TXD_CSUM_OFFSET_M (0xffff << HNS3_TXD_CSUM_OFFSET_S)
#define HNS3_TXD_BDTYPE_S 0
#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S)
#define HNS3_TXD_FE_B 4
......@@ -159,8 +172,11 @@ enum hns3_nic_state {
#define HNS3_TXD_DECTTL_S 12
#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S)
#define HNS3_TXD_OL4CS_B 22
#define HNS3_TXD_MSS_S 0
#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
#define HNS3_TXD_HW_CS_B 14
#define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
#define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
......@@ -250,9 +266,9 @@ struct __packed hns3_desc {
};
};
__le32 paylen;
__le32 paylen_ol4cs;
__le16 bdtp_fe_sc_vld_ra_ri;
__le16 mss;
__le16 mss_hw_csum;
} tx;
struct {
......@@ -371,6 +387,7 @@ struct ring_stats {
u64 err_bd_num;
u64 l2_err;
u64 l3l4_csum_err;
u64 csum_complete;
u64 rx_multicast;
u64 non_reuse_pg;
};
......
......@@ -55,6 +55,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
HNS3_TQP_STAT("err_bd_num", err_bd_num),
HNS3_TQP_STAT("l2_err", l2_err),
HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
HNS3_TQP_STAT("csum_complete", csum_complete),
HNS3_TQP_STAT("multicast", rx_multicast),
HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg),
};
......
......@@ -355,6 +355,10 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B))
set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_HW_TX_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
}
static enum hclge_cmd_status
......
......@@ -376,12 +376,13 @@ enum HCLGE_CAP_BITS {
HCLGE_CAP_FD_FORWARD_TC_B,
HCLGE_CAP_PTP_B,
HCLGE_CAP_INT_QL_B,
HCLGE_CAP_SIMPLE_BD_B,
HCLGE_CAP_HW_TX_CSUM_B,
HCLGE_CAP_TX_PUSH_B,
HCLGE_CAP_PHY_IMP_B,
HCLGE_CAP_TQP_TXRX_INDEP_B,
HCLGE_CAP_HW_PAD_B,
HCLGE_CAP_STASH_B,
HCLGE_CAP_UDP_TUNNEL_CSUM_B,
};
#define HCLGE_QUERY_CAP_LENGTH 3
......
......@@ -715,7 +715,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
}
}
static void hclge_pfc_info_init(struct hclge_dev *hdev)
static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
{
if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
......@@ -733,6 +733,27 @@ static void hclge_pfc_info_init(struct hclge_dev *hdev)
}
}
static void hclge_update_fc_mode(struct hclge_dev *hdev)
{
if (!hdev->tm_info.pfc_en) {
hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
return;
}
if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
hdev->tm_info.fc_mode = HCLGE_FC_PFC;
}
}
static void hclge_pfc_info_init(struct hclge_dev *hdev)
{
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
hclge_update_fc_mode(hdev);
else
hclge_update_fc_mode_by_dcb_flag(hdev);
}
static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
{
hclge_tm_pg_info_init(hdev);
......
......@@ -336,6 +336,10 @@ static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_TQP_TXRX_INDEP_B))
set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_HW_TX_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_TUNNEL_CSUM_B))
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
}
static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
......
......@@ -152,12 +152,13 @@ enum HCLGEVF_CAP_BITS {
HCLGEVF_CAP_FD_FORWARD_TC_B,
HCLGEVF_CAP_PTP_B,
HCLGEVF_CAP_INT_QL_B,
HCLGEVF_CAP_SIMPLE_BD_B,
HCLGEVF_CAP_HW_TX_CSUM_B,
HCLGEVF_CAP_TX_PUSH_B,
HCLGEVF_CAP_PHY_IMP_B,
HCLGEVF_CAP_TQP_TXRX_INDEP_B,
HCLGEVF_CAP_HW_PAD_B,
HCLGEVF_CAP_STASH_B,
HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
};
#define HCLGEVF_QUERY_CAP_LENGTH 3
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment