Commit a8038bef authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Peng Li says:

====================
add some features and fix some bugs for HNS3 driver

This patchset adds some new feature support and fixes some bugs:
[Patch 1/17 - 5/17] add the support to modify/query the tqp number
through ethtool -L/l command, and also fix some related bugs for
change tqp number.
[Patch 6/17 - 9-17] add support vlan tag offload on tx&&rx direction
for pf, and fix some related bugs.
[patch 10/17 - 11/17] fix bugs for auto negotiation.
[patch 12/17] adds support for ethtool command set_pauseparam.
[patch 13/17 - 14/17] add support to update flow control settings after
autoneg.
[patch 15/17 - 17/17] fix some other bugs in net-next.

---
Change Log:
V4 -> V5:
1. change the name spelling of Peng Li.

V3 -> V4:
1. change the name spelling of Mingguang Qu and Jian Shen.

V2 -> V3:
1. order local variables requested by David Miller.
2. use "int" for index iteration loops requested by David Miller.

V1 -> V2:
1. fix the comments from Sergei Shtylyov.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 214bb1c7 71b83869
...@@ -278,6 +278,8 @@ struct hnae3_ae_dev { ...@@ -278,6 +278,8 @@ struct hnae3_ae_dev {
* Set vlan filter config of Ports * Set vlan filter config of Ports
* set_vf_vlan_filter() * set_vf_vlan_filter()
* Set vlan filter config of vf * Set vlan filter config of vf
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
*/ */
struct hnae3_ae_ops { struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
...@@ -384,8 +386,16 @@ struct hnae3_ae_ops { ...@@ -384,8 +386,16 @@ struct hnae3_ae_ops {
u16 vlan_id, bool is_kill); u16 vlan_id, bool is_kill);
int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
u16 vlan, u8 qos, __be16 proto); u16 vlan, u8 qos, __be16 proto);
int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct hnae3_handle *handle, void (*reset_event)(struct hnae3_handle *handle,
enum hnae3_reset_type reset); enum hnae3_reset_type reset);
void (*get_channels)(struct hnae3_handle *handle,
struct ethtool_channels *ch);
void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
u16 *free_tqps, u16 *max_rss_size);
int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
void (*get_flowctrl_adv)(struct hnae3_handle *handle,
u32 *flowctrl_adv);
}; };
struct hnae3_dcb_ops { struct hnae3_dcb_ops {
......
...@@ -723,6 +723,58 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) ...@@ -723,6 +723,58 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
} }
static int hns3_fill_desc_vtags(struct sk_buff *skb,
struct hns3_enet_ring *tx_ring,
u32 *inner_vlan_flag,
u32 *out_vlan_flag,
u16 *inner_vtag,
u16 *out_vtag)
{
#define HNS3_TX_VLAN_PRIO_SHIFT 13
if (skb->protocol == htons(ETH_P_8021Q) &&
!(tx_ring->tqp->handle->kinfo.netdev->features &
NETIF_F_HW_VLAN_CTAG_TX)) {
/* When HW VLAN acceleration is turned off, and the stack
* sets the protocol to 802.1q, the driver just need to
* set the protocol to the encapsulated ethertype.
*/
skb->protocol = vlan_get_protocol(skb);
return 0;
}
if (skb_vlan_tag_present(skb)) {
u16 vlan_tag;
vlan_tag = skb_vlan_tag_get(skb);
vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
/* Based on hw strategy, use out_vtag in two layer tag case,
* and use inner_vtag in one tag case.
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
*out_vtag = vlan_tag;
} else {
hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
}
} else if (skb->protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *vhdr;
int rc;
rc = skb_cow_head(skb, 0);
if (rc < 0)
return rc;
vhdr = (struct vlan_ethhdr *)skb->data;
vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
<< HNS3_TX_VLAN_PRIO_SHIFT);
}
skb->protocol = vlan_get_protocol(skb);
return 0;
}
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end, int size, dma_addr_t dma, int frag_end,
enum hns_desc_type type) enum hns_desc_type type)
...@@ -733,6 +785,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -733,6 +785,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
u16 bdtp_fe_sc_vld_ra_ri = 0; u16 bdtp_fe_sc_vld_ra_ri = 0;
u32 type_cs_vlan_tso = 0; u32 type_cs_vlan_tso = 0;
struct sk_buff *skb; struct sk_buff *skb;
u16 inner_vtag = 0;
u16 out_vtag = 0;
u32 paylen = 0; u32 paylen = 0;
u16 mss = 0; u16 mss = 0;
__be16 protocol; __be16 protocol;
...@@ -756,15 +810,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -756,15 +810,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
skb = (struct sk_buff *)priv; skb = (struct sk_buff *)priv;
paylen = skb->len; paylen = skb->len;
ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
&ol_type_vlan_len_msec,
&inner_vtag, &out_vtag);
if (unlikely(ret))
return ret;
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_mac_len(skb); skb_reset_mac_len(skb);
protocol = skb->protocol; protocol = skb->protocol;
/* vlan packet*/
if (protocol == htons(ETH_P_8021Q)) {
protocol = vlan_get_protocol(skb);
skb->protocol = protocol;
}
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (ret) if (ret)
return ret; return ret;
...@@ -790,6 +845,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -790,6 +845,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
cpu_to_le32(type_cs_vlan_tso); cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen); desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss); desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
} }
/* move ring pointer to next.*/ /* move ring pointer to next.*/
...@@ -1032,6 +1089,9 @@ static int hns3_nic_set_features(struct net_device *netdev, ...@@ -1032,6 +1089,9 @@ static int hns3_nic_set_features(struct net_device *netdev,
netdev_features_t features) netdev_features_t features)
{ {
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
netdev_features_t changed;
int ret;
if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
priv->ops.fill_desc = hns3_fill_desc_tso; priv->ops.fill_desc = hns3_fill_desc_tso;
...@@ -1041,6 +1101,17 @@ static int hns3_nic_set_features(struct net_device *netdev, ...@@ -1041,6 +1101,17 @@ static int hns3_nic_set_features(struct net_device *netdev,
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
} }
changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
else
ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
if (ret)
return ret;
}
netdev->features = features; netdev->features = features;
return 0; return 0;
} }
...@@ -1492,6 +1563,7 @@ static void hns3_set_default_feature(struct net_device *netdev) ...@@ -1492,6 +1563,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
...@@ -1506,6 +1578,7 @@ static void hns3_set_default_feature(struct net_device *netdev) ...@@ -1506,6 +1578,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
...@@ -2085,6 +2158,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -2085,6 +2158,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
prefetchw(skb->data); prefetchw(skb->data);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
* in one layer tag case.
*/
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
u16 vlan_tag;
vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
if (!(vlan_tag & VLAN_VID_MASK))
vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
if (vlan_tag & VLAN_VID_MASK)
__vlan_hwaccel_put_tag(skb,
htons(ETH_P_8021Q),
vlan_tag);
}
bnum = 1; bnum = 1;
if (length <= HNS3_RX_HEAD_SIZE) { if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
...@@ -2651,6 +2740,19 @@ static int hns3_get_ring_config(struct hns3_nic_priv *priv) ...@@ -2651,6 +2740,19 @@ static int hns3_get_ring_config(struct hns3_nic_priv *priv)
return ret; return ret;
} }
static void hns3_put_ring_config(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
int i;
for (i = 0; i < h->kinfo.num_tqps; i++) {
devm_kfree(priv->dev, priv->ring_data[i].ring);
devm_kfree(priv->dev,
priv->ring_data[i + h->kinfo.num_tqps].ring);
}
devm_kfree(priv->dev, priv->ring_data);
}
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
{ {
int ret; int ret;
...@@ -2787,8 +2889,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) ...@@ -2787,8 +2889,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
h->ae_algo->ops->reset_queue(h, i); h->ae_algo->ops->reset_queue(h, i);
hns3_fini_ring(priv->ring_data[i].ring); hns3_fini_ring(priv->ring_data[i].ring);
devm_kfree(priv->dev, priv->ring_data[i].ring);
hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
devm_kfree(priv->dev,
priv->ring_data[i + h->kinfo.num_tqps].ring);
} }
devm_kfree(priv->dev, priv->ring_data);
return 0; return 0;
} }
...@@ -3162,6 +3268,115 @@ static int hns3_reset_notify(struct hnae3_handle *handle, ...@@ -3162,6 +3268,115 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
return ret; return ret;
} }
static u16 hns3_get_max_available_channels(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
u16 free_tqps, max_rss_size, max_tqps;
h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
max_tqps = h->kinfo.num_tc * max_rss_size;
return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
}
static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret;
ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
if (ret)
return ret;
ret = hns3_get_ring_config(priv);
if (ret)
return ret;
ret = hns3_nic_init_vector_data(priv);
if (ret)
goto err_uninit_vector;
ret = hns3_init_all_ring(priv);
if (ret)
goto err_put_ring;
return 0;
err_put_ring:
hns3_put_ring_config(priv);
err_uninit_vector:
hns3_nic_uninit_vector_data(priv);
return ret;
}
static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
{
return (new_tqp_num / num_tc) * num_tc;
}
int hns3_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
bool if_running = netif_running(netdev);
u32 new_tqp_num = ch->combined_count;
u16 org_tqp_num;
int ret;
if (ch->rx_count || ch->tx_count)
return -EINVAL;
if (new_tqp_num > hns3_get_max_available_channels(netdev) ||
new_tqp_num < kinfo->num_tc) {
dev_err(&netdev->dev,
"Change tqps fail, the tqp range is from %d to %d",
kinfo->num_tc,
hns3_get_max_available_channels(netdev));
return -EINVAL;
}
new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
if (kinfo->num_tqps == new_tqp_num)
return 0;
if (if_running)
dev_close(netdev);
hns3_clear_all_ring(h);
ret = hns3_nic_uninit_vector_data(priv);
if (ret) {
dev_err(&netdev->dev,
"Unbind vector with tqp fail, nothing is changed");
goto open_netdev;
}
hns3_uninit_all_ring(priv);
org_tqp_num = h->kinfo.num_tqps;
ret = hns3_modify_tqp_num(netdev, new_tqp_num);
if (ret) {
ret = hns3_modify_tqp_num(netdev, org_tqp_num);
if (ret) {
/* If revert to old tqp failed, fatal error occurred */
dev_err(&netdev->dev,
"Revert to old tqp num fail, ret=%d", ret);
return ret;
}
dev_info(&netdev->dev,
"Change tqp num fail, Revert to old tqp num");
}
open_netdev:
if (if_running)
dev_open(netdev);
return ret;
}
static const struct hnae3_client_ops client_ops = { static const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init, .init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit, .uninit_instance = hns3_client_uninit,
......
...@@ -595,6 +595,8 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) ...@@ -595,6 +595,8 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle) (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
void hns3_ethtool_set_ops(struct net_device *netdev); void hns3_ethtool_set_ops(struct net_device *netdev);
int hns3_set_channels(struct net_device *netdev,
struct ethtool_channels *ch);
bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_init_all_ring(struct hns3_nic_priv *priv);
......
...@@ -559,10 +559,23 @@ static void hns3_get_pauseparam(struct net_device *netdev, ...@@ -559,10 +559,23 @@ static void hns3_get_pauseparam(struct net_device *netdev,
&param->rx_pause, &param->tx_pause); &param->rx_pause, &param->tx_pause);
} }
static int hns3_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->set_pauseparam)
return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
param->rx_pause,
param->tx_pause);
return -EOPNOTSUPP;
}
static int hns3_get_link_ksettings(struct net_device *netdev, static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd) struct ethtool_link_ksettings *cmd)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
u32 flowctrl_adv = 0;
u32 supported_caps; u32 supported_caps;
u32 advertised_caps; u32 advertised_caps;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
...@@ -638,6 +651,8 @@ static int hns3_get_link_ksettings(struct net_device *netdev, ...@@ -638,6 +651,8 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
if (!cmd->base.autoneg) if (!cmd->base.autoneg)
advertised_caps &= ~HNS3_LM_AUTONEG_BIT; advertised_caps &= ~HNS3_LM_AUTONEG_BIT;
advertised_caps &= ~HNS3_LM_PAUSE_BIT;
/* now, map driver link modes to ethtool link modes */ /* now, map driver link modes to ethtool link modes */
hns3_driv_to_eth_caps(supported_caps, cmd, false); hns3_driv_to_eth_caps(supported_caps, cmd, false);
hns3_driv_to_eth_caps(advertised_caps, cmd, true); hns3_driv_to_eth_caps(advertised_caps, cmd, true);
...@@ -650,6 +665,18 @@ static int hns3_get_link_ksettings(struct net_device *netdev, ...@@ -650,6 +665,18 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
/* 4.mdio_support */ /* 4.mdio_support */
cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
/* 5.get flow control setttings */
if (h->ae_algo->ops->get_flowctrl_adv)
h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv);
if (flowctrl_adv & ADVERTISED_Pause)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Pause);
if (flowctrl_adv & ADVERTISED_Asym_Pause)
ethtool_link_ksettings_add_link_mode(cmd, advertising,
Asym_Pause);
return 0; return 0;
} }
...@@ -730,7 +757,7 @@ static int hns3_get_rxnfc(struct net_device *netdev, ...@@ -730,7 +757,7 @@ static int hns3_get_rxnfc(struct net_device *netdev,
switch (cmd->cmd) { switch (cmd->cmd) {
case ETHTOOL_GRXRINGS: case ETHTOOL_GRXRINGS:
cmd->data = h->kinfo.num_tc * h->kinfo.rss_size; cmd->data = h->kinfo.rss_size;
break; break;
case ETHTOOL_GRXFH: case ETHTOOL_GRXFH:
return h->ae_algo->ops->get_rss_tuple(h, cmd); return h->ae_algo->ops->get_rss_tuple(h, cmd);
...@@ -849,6 +876,15 @@ static int hns3_nway_reset(struct net_device *netdev) ...@@ -849,6 +876,15 @@ static int hns3_nway_reset(struct net_device *netdev)
return genphy_restart_aneg(phy); return genphy_restart_aneg(phy);
} }
void hns3_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->get_channels)
h->ae_algo->ops->get_channels(h, ch);
}
static const struct ethtool_ops hns3vf_ethtool_ops = { static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_drvinfo = hns3_get_drvinfo, .get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam, .get_ringparam = hns3_get_ringparam,
...@@ -871,6 +907,7 @@ static const struct ethtool_ops hns3_ethtool_ops = { ...@@ -871,6 +907,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_ringparam = hns3_get_ringparam, .get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam, .set_ringparam = hns3_set_ringparam,
.get_pauseparam = hns3_get_pauseparam, .get_pauseparam = hns3_get_pauseparam,
.set_pauseparam = hns3_set_pauseparam,
.get_strings = hns3_get_strings, .get_strings = hns3_get_strings,
.get_ethtool_stats = hns3_get_stats, .get_ethtool_stats = hns3_get_stats,
.get_sset_count = hns3_get_sset_count, .get_sset_count = hns3_get_sset_count,
...@@ -883,6 +920,8 @@ static const struct ethtool_ops hns3_ethtool_ops = { ...@@ -883,6 +920,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_link_ksettings = hns3_get_link_ksettings, .get_link_ksettings = hns3_get_link_ksettings,
.set_link_ksettings = hns3_set_link_ksettings, .set_link_ksettings = hns3_set_link_ksettings,
.nway_reset = hns3_nway_reset, .nway_reset = hns3_nway_reset,
.get_channels = hns3_get_channels,
.set_channels = hns3_set_channels,
}; };
void hns3_ethtool_set_ops(struct net_device *netdev) void hns3_ethtool_set_ops(struct net_device *netdev)
......
...@@ -180,6 +180,10 @@ enum hclge_opcode_type { ...@@ -180,6 +180,10 @@ enum hclge_opcode_type {
/* Promisuous mode command */ /* Promisuous mode command */
HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01, HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
/* Vlan offload command */
HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
/* Interrupts cmd */ /* Interrupts cmd */
HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503, HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504, HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
...@@ -191,6 +195,7 @@ enum hclge_opcode_type { ...@@ -191,6 +195,7 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012,
/* Multicast linear table cmd */ /* Multicast linear table cmd */
HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020, HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020,
...@@ -399,6 +404,8 @@ struct hclge_pf_res_cmd { ...@@ -399,6 +404,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0) #define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0)
#define HCLGE_CFG_DEFAULT_SPEED_S 16 #define HCLGE_CFG_DEFAULT_SPEED_S 16
#define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16) #define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16)
#define HCLGE_CFG_RSS_SIZE_S 24
#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
struct hclge_cfg_param_cmd { struct hclge_cfg_param_cmd {
__le32 offset; __le32 offset;
...@@ -587,6 +594,15 @@ struct hclge_mac_vlan_tbl_entry_cmd { ...@@ -587,6 +594,15 @@ struct hclge_mac_vlan_tbl_entry_cmd {
u8 rsv2[6]; u8 rsv2[6];
}; };
#define HCLGE_VLAN_MASK_EN_B 0x0
struct hclge_mac_vlan_mask_entry_cmd {
u8 rsv0[2];
u8 vlan_mask;
u8 rsv1;
u8 mac_mask[6];
u8 rsv2[14];
};
#define HCLGE_CFG_MTA_MAC_SEL_S 0x0 #define HCLGE_CFG_MTA_MAC_SEL_S 0x0
#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0) #define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
#define HCLGE_CFG_MTA_MAC_EN_B 0x7 #define HCLGE_CFG_MTA_MAC_EN_B 0x7
...@@ -658,6 +674,47 @@ struct hclge_vlan_filter_vf_cfg_cmd { ...@@ -658,6 +674,47 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[16]; u8 vf_bitmap[16];
}; };
#define HCLGE_ACCEPT_TAG_B 0
#define HCLGE_ACCEPT_UNTAG_B 1
#define HCLGE_PORT_INS_TAG1_EN_B 2
#define HCLGE_PORT_INS_TAG2_EN_B 3
#define HCLGE_CFG_NIC_ROCE_SEL_B 4
struct hclge_vport_vtag_tx_cfg_cmd {
u8 vport_vlan_cfg;
u8 vf_offset;
u8 rsv1[2];
__le16 def_vlan_tag1;
__le16 def_vlan_tag2;
u8 vf_bitmap[8];
u8 rsv2[8];
};
#define HCLGE_REM_TAG1_EN_B 0
#define HCLGE_REM_TAG2_EN_B 1
#define HCLGE_SHOW_TAG1_EN_B 2
#define HCLGE_SHOW_TAG2_EN_B 3
struct hclge_vport_vtag_rx_cfg_cmd {
u8 vport_vlan_cfg;
u8 vf_offset;
u8 rsv1[6];
u8 vf_bitmap[8];
u8 rsv2[8];
};
struct hclge_tx_vlan_type_cfg_cmd {
__le16 ot_vlan_type;
__le16 in_vlan_type;
u8 rsv[20];
};
struct hclge_rx_vlan_type_cfg_cmd {
__le16 ot_fst_vlan_type;
__le16 ot_sec_vlan_type;
__le16 in_fst_vlan_type;
__le16 in_sec_vlan_type;
u8 rsv[16];
};
struct hclge_cfg_com_tqp_queue_cmd { struct hclge_cfg_com_tqp_queue_cmd {
__le16 tqp_id; __le16 tqp_id;
__le16 stream_id; __le16 stream_id;
......
...@@ -79,6 +79,10 @@ ...@@ -79,6 +79,10 @@
#define HCLGE_PHY_MDIX_STATUS_B (6) #define HCLGE_PHY_MDIX_STATUS_B (6)
#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11) #define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11)
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
#define HCLGE_VF_NUM_PER_BYTE 8
/* Reset related Registers */ /* Reset related Registers */
#define HCLGE_MISC_RESET_STS_REG 0x20700 #define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_GLOBAL_RESET_REG 0x20A00 #define HCLGE_GLOBAL_RESET_REG 0x20A00
...@@ -220,6 +224,7 @@ struct hclge_cfg { ...@@ -220,6 +224,7 @@ struct hclge_cfg {
u8 tc_num; u8 tc_num;
u16 tqp_desc_num; u16 tqp_desc_num;
u16 rx_buf_len; u16 rx_buf_len;
u16 rss_size_max;
u8 phy_addr; u8 phy_addr;
u8 media_type; u8 media_type;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
...@@ -423,6 +428,15 @@ struct hclge_hw_stats { ...@@ -423,6 +428,15 @@ struct hclge_hw_stats {
struct hclge_32_bit_stats all_32_bit_stats; struct hclge_32_bit_stats all_32_bit_stats;
}; };
struct hclge_vlan_type_cfg {
u16 rx_ot_fst_vlan_type;
u16 rx_ot_sec_vlan_type;
u16 rx_in_fst_vlan_type;
u16 rx_in_sec_vlan_type;
u16 tx_ot_vlan_type;
u16 tx_in_vlan_type;
};
struct hclge_dev { struct hclge_dev {
struct pci_dev *pdev; struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev; struct hnae3_ae_dev *ae_dev;
...@@ -509,6 +523,26 @@ struct hclge_dev { ...@@ -509,6 +523,26 @@ struct hclge_dev {
enum hclge_mta_dmac_sel_type mta_mac_sel_type; enum hclge_mta_dmac_sel_type mta_mac_sel_type;
bool enable_mta; /* Mutilcast filter enable */ bool enable_mta; /* Mutilcast filter enable */
bool accept_mta_mc; /* Whether accept mta filter multicast */ bool accept_mta_mc; /* Whether accept mta filter multicast */
struct hclge_vlan_type_cfg vlan_type_cfg;
};
/* VPort level vlan tag configuration for TX direction */
struct hclge_tx_vtag_cfg {
bool accept_tag; /* Whether accept tagged packet from host */
bool accept_untag; /* Whether accept untagged packet from host */
bool insert_tag1_en; /* Whether insert inner vlan tag */
bool insert_tag2_en; /* Whether insert outer vlan tag */
u16 default_tag1; /* The default inner vlan tag to insert */
u16 default_tag2; /* The default outer vlan tag to insert */
};
/* VPort level vlan tag configuration for RX direction */
struct hclge_rx_vtag_cfg {
bool strip_tag1_en; /* Whether strip inner vlan tag */
bool strip_tag2_en; /* Whether strip outer vlan tag */
bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
}; };
struct hclge_vport { struct hclge_vport {
...@@ -523,6 +557,9 @@ struct hclge_vport { ...@@ -523,6 +557,9 @@ struct hclge_vport {
u16 bw_limit; /* VSI BW Limit (0 = disabled) */ u16 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr; u8 dwrr;
struct hclge_tx_vtag_cfg txvlan_cfg;
struct hclge_rx_vtag_cfg rxvlan_cfg;
int vport_id; int vport_id;
struct hclge_dev *back; /* Back reference to associated dev */ struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic; struct hnae3_handle nic;
...@@ -565,4 +602,5 @@ int hclge_rss_init_hw(struct hclge_dev *hdev); ...@@ -565,4 +602,5 @@ int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev); void hclge_mbx_handler(struct hclge_dev *hdev);
void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
int hclge_cfg_flowctrl(struct hclge_dev *hdev);
#endif #endif
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \ #define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \
SUPPORTED_TP | \ SUPPORTED_TP | \
SUPPORTED_Pause | \ SUPPORTED_Pause | \
SUPPORTED_Asym_Pause | \
PHY_10BT_FEATURES | \ PHY_10BT_FEATURES | \
PHY_100BT_FEATURES | \ PHY_100BT_FEATURES | \
PHY_1000BT_FEATURES) PHY_1000BT_FEATURES)
...@@ -183,6 +184,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev) ...@@ -183,6 +184,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
if (ret) if (ret)
netdev_err(netdev, "failed to adjust link.\n"); netdev_err(netdev, "failed to adjust link.\n");
ret = hclge_cfg_flowctrl(hdev);
if (ret)
netdev_err(netdev, "failed to configure flow control.\n");
} }
int hclge_mac_start_phy(struct hclge_dev *hdev) int hclge_mac_start_phy(struct hclge_dev *hdev)
......
...@@ -23,8 +23,8 @@ enum hclge_shaper_level { ...@@ -23,8 +23,8 @@ enum hclge_shaper_level {
HCLGE_SHAPER_LVL_PF = 1, HCLGE_SHAPER_LVL_PF = 1,
}; };
#define HCLGE_SHAPER_BS_U_DEF 1 #define HCLGE_SHAPER_BS_U_DEF 5
#define HCLGE_SHAPER_BS_S_DEF 4 #define HCLGE_SHAPER_BS_S_DEF 20
#define HCLGE_ETHER_MAX_RATE 100000 #define HCLGE_ETHER_MAX_RATE 100000
...@@ -112,7 +112,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, ...@@ -112,7 +112,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
return 0; return 0;
} }
static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
{ {
struct hclge_desc desc; struct hclge_desc desc;
......
...@@ -118,4 +118,5 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); ...@@ -118,4 +118,5 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_map_cfg(struct hclge_dev *hdev); int hclge_tm_map_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev); int hclge_tm_init_hw(struct hclge_dev *hdev);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment