Commit f0332962 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Salil Mehta says:

====================
Misc. bug fixes & some minor additions to HNS3 driver

This patch-set provides some bug fixes figured out during testing
and review. It also provides some additions due to running of the
existing code on the new revision of the HNS3 hardware.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e9be0e99 544a7bcd
...@@ -36,6 +36,49 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, ...@@ -36,6 +36,49 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
return false; return false;
} }
static void hnae3_set_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev, int inited)
{
switch (client->type) {
case HNAE3_CLIENT_KNIC:
hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_UNIC:
hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
break;
case HNAE3_CLIENT_ROCE:
hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
break;
default:
break;
}
}
static int hnae3_get_client_init_flag(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
int inited = 0;
switch (client->type) {
case HNAE3_CLIENT_KNIC:
inited = hnae_get_bit(ae_dev->flag,
HNAE3_KNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_UNIC:
inited = hnae_get_bit(ae_dev->flag,
HNAE3_UNIC_CLIENT_INITED_B);
break;
case HNAE3_CLIENT_ROCE:
inited = hnae_get_bit(ae_dev->flag,
HNAE3_ROCE_CLIENT_INITED_B);
break;
default:
break;
}
return inited;
}
static int hnae3_match_n_instantiate(struct hnae3_client *client, static int hnae3_match_n_instantiate(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev, bool is_reg) struct hnae3_ae_dev *ae_dev, bool is_reg)
{ {
...@@ -56,14 +99,14 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, ...@@ -56,14 +99,14 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
return ret; return ret;
} }
hnae_set_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B, 1); hnae3_set_client_init_flag(client, ae_dev, 1);
return 0; return 0;
} }
if (hnae_get_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B)) { if (hnae3_get_client_init_flag(client, ae_dev)) {
ae_dev->ops->uninit_client_instance(client, ae_dev); ae_dev->ops->uninit_client_instance(client, ae_dev);
hnae_set_bit(ae_dev->flag, HNAE3_CLIENT_INITED_B, 0); hnae3_set_client_init_flag(client, ae_dev, 0);
} }
return 0; return 0;
......
...@@ -54,7 +54,9 @@ ...@@ -54,7 +54,9 @@
#define HNAE3_DEV_INITED_B 0x0 #define HNAE3_DEV_INITED_B 0x0
#define HNAE3_DEV_SUPPORT_ROCE_B 0x1 #define HNAE3_DEV_SUPPORT_ROCE_B 0x1
#define HNAE3_DEV_SUPPORT_DCB_B 0x2 #define HNAE3_DEV_SUPPORT_DCB_B 0x2
#define HNAE3_CLIENT_INITED_B 0x3 #define HNAE3_KNIC_CLIENT_INITED_B 0x3
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B)) BIT(HNAE3_DEV_SUPPORT_ROCE_B))
......
...@@ -25,6 +25,9 @@ ...@@ -25,6 +25,9 @@
#include "hnae3.h" #include "hnae3.h"
#include "hns3_enet.h" #include "hns3_enet.h"
static void hns3_clear_all_ring(struct hnae3_handle *h);
static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
static const char hns3_driver_name[] = "hns3"; static const char hns3_driver_name[] = "hns3";
const char hns3_driver_version[] = VERMAGIC_STRING; const char hns3_driver_version[] = VERMAGIC_STRING;
static const char hns3_driver_string[] = static const char hns3_driver_string[] =
...@@ -273,6 +276,10 @@ static int hns3_nic_net_up(struct net_device *netdev) ...@@ -273,6 +276,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
int i, j; int i, j;
int ret; int ret;
ret = hns3_nic_reset_all_ring(h);
if (ret)
return ret;
/* get irq resource for all vectors */ /* get irq resource for all vectors */
ret = hns3_nic_init_irq(priv); ret = hns3_nic_init_irq(priv);
if (ret) { if (ret) {
...@@ -333,17 +340,19 @@ static void hns3_nic_net_down(struct net_device *netdev) ...@@ -333,17 +340,19 @@ static void hns3_nic_net_down(struct net_device *netdev)
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return; return;
/* disable vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_disable(&priv->tqp_vector[i]);
/* stop ae_dev */ /* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops; ops = priv->ae_handle->ae_algo->ops;
if (ops->stop) if (ops->stop)
ops->stop(priv->ae_handle); ops->stop(priv->ae_handle);
/* disable vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_disable(&priv->tqp_vector[i]);
/* free irq resources */ /* free irq resources */
hns3_nic_uninit_irq(priv); hns3_nic_uninit_irq(priv);
hns3_clear_all_ring(priv->ae_handle);
} }
static int hns3_nic_net_stop(struct net_device *netdev) static int hns3_nic_net_stop(struct net_device *netdev)
...@@ -1819,6 +1828,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, ...@@ -1819,6 +1828,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
hns3_unmap_buffer(ring, &ring->desc_cb[i]); hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb; ring->desc_cb[i] = *res_cb;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
ring->desc[i].rx.bd_base_info = 0;
} }
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
...@@ -1826,6 +1836,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) ...@@ -1826,6 +1836,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
ring->desc_cb[i].reuse_flag = 0; ring->desc_cb[i].reuse_flag = 0;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
+ ring->desc_cb[i].page_offset); + ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
} }
static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
...@@ -2066,6 +2077,39 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) ...@@ -2066,6 +2077,39 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
napi_gro_receive(&ring->tqp_vector->napi, skb); napi_gro_receive(&ring->tqp_vector->napi, skb);
} }
static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
struct hns3_desc *desc, u32 l234info)
{
struct pci_dev *pdev = ring->tqp->handle->pdev;
u16 vlan_tag;
if (pdev->revision == 0x20) {
vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
if (!(vlan_tag & VLAN_VID_MASK))
vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
return vlan_tag;
}
#define HNS3_STRP_OUTER_VLAN 0x1
#define HNS3_STRP_INNER_VLAN 0x2
switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
break;
case HNS3_STRP_INNER_VLAN:
vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
break;
default:
vlan_tag = 0;
break;
}
return vlan_tag;
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
struct sk_buff **out_skb, int *out_bnum) struct sk_buff **out_skb, int *out_bnum)
{ {
...@@ -2085,9 +2129,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -2085,9 +2129,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
prefetch(desc); prefetch(desc);
length = le16_to_cpu(desc->rx.pkt_len); length = le16_to_cpu(desc->rx.size);
bd_base_info = le32_to_cpu(desc->rx.bd_base_info); bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
l234info = le32_to_cpu(desc->rx.l234_info);
/* Check valid BD */ /* Check valid BD */
if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
...@@ -2121,22 +2164,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -2121,22 +2164,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
prefetchw(skb->data); prefetchw(skb->data);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
* in one layer tag case.
*/
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
u16 vlan_tag;
vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
if (!(vlan_tag & VLAN_VID_MASK))
vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
if (vlan_tag & VLAN_VID_MASK)
__vlan_hwaccel_put_tag(skb,
htons(ETH_P_8021Q),
vlan_tag);
}
bnum = 1; bnum = 1;
if (length <= HNS3_RX_HEAD_SIZE) { if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
...@@ -2173,6 +2200,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -2173,6 +2200,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
*out_bnum = bnum; *out_bnum = bnum;
l234info = le32_to_cpu(desc->rx.l234_info);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
* in one layer tag case.
*/
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
u16 vlan_tag;
vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
if (vlan_tag & VLAN_VID_MASK)
__vlan_hwaccel_put_tag(skb,
htons(ETH_P_8021Q),
vlan_tag);
}
if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
netdev_err(netdev, "no valid bd,%016llx,%016llx\n", netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
((u64 *)desc)[0], ((u64 *)desc)[1]); ((u64 *)desc)[0], ((u64 *)desc)[1]);
...@@ -2905,8 +2948,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) ...@@ -2905,8 +2948,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
goto out_when_alloc_ring_memory; goto out_when_alloc_ring_memory;
} }
hns3_init_ring_hw(priv->ring_data[i].ring);
u64_stats_init(&priv->ring_data[i].ring->syncp); u64_stats_init(&priv->ring_data[i].ring->syncp);
} }
...@@ -3068,6 +3109,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) ...@@ -3068,6 +3109,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
if (netdev->reg_state != NETREG_UNINITIALIZED) if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev); unregister_netdev(netdev);
hns3_force_clear_all_rx_ring(handle);
ret = hns3_nic_uninit_vector_data(priv); ret = hns3_nic_uninit_vector_data(priv);
if (ret) if (ret)
netdev_err(netdev, "uninit vector error\n"); netdev_err(netdev, "uninit vector error\n");
...@@ -3183,20 +3226,48 @@ static void hns3_recover_hw_addr(struct net_device *ndev) ...@@ -3183,20 +3226,48 @@ static void hns3_recover_hw_addr(struct net_device *ndev)
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{ {
if (!HNAE3_IS_TX_RING(ring))
return;
while (ring->next_to_clean != ring->next_to_use) { while (ring->next_to_clean != ring->next_to_use) {
ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
hns3_free_buffer_detach(ring, ring->next_to_clean); hns3_free_buffer_detach(ring, ring->next_to_clean);
ring_ptr_move_fw(ring, next_to_clean); ring_ptr_move_fw(ring, next_to_clean);
} }
} }
static void hns3_clear_rx_ring(struct hns3_enet_ring *ring) static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
{ {
if (HNAE3_IS_TX_RING(ring)) struct hns3_desc_cb res_cbs;
return; int ret;
while (ring->next_to_use != ring->next_to_clean) {
/* When a buffer is not reused, it's memory has been
* freed in hns3_handle_rx_bd or will be freed by
* stack, so we need to replace the buffer here.
*/
if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
ret = hns3_reserve_buffer_map(ring, &res_cbs);
if (ret) {
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
/* if alloc new buffer fail, exit directly
* and reclear in up flow.
*/
netdev_warn(ring->tqp->handle->kinfo.netdev,
"reserve buffer map failed, ret = %d\n",
ret);
return ret;
}
hns3_replace_buffer(ring, ring->next_to_use,
&res_cbs);
}
ring_ptr_move_fw(ring, next_to_use);
}
return 0;
}
static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
{
while (ring->next_to_use != ring->next_to_clean) { while (ring->next_to_use != ring->next_to_clean) {
/* When a buffer is not reused, it's memory has been /* When a buffer is not reused, it's memory has been
* freed in hns3_handle_rx_bd or will be freed by * freed in hns3_handle_rx_bd or will be freed by
...@@ -3212,6 +3283,19 @@ static void hns3_clear_rx_ring(struct hns3_enet_ring *ring) ...@@ -3212,6 +3283,19 @@ static void hns3_clear_rx_ring(struct hns3_enet_ring *ring)
} }
} }
static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
{
struct net_device *ndev = h->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hns3_enet_ring *ring;
u32 i;
for (i = 0; i < h->kinfo.num_tqps; i++) {
ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
hns3_force_clear_rx_ring(ring);
}
}
static void hns3_clear_all_ring(struct hnae3_handle *h) static void hns3_clear_all_ring(struct hnae3_handle *h)
{ {
struct net_device *ndev = h->kinfo.netdev; struct net_device *ndev = h->kinfo.netdev;
...@@ -3229,10 +3313,51 @@ static void hns3_clear_all_ring(struct hnae3_handle *h) ...@@ -3229,10 +3313,51 @@ static void hns3_clear_all_ring(struct hnae3_handle *h)
netdev_tx_reset_queue(dev_queue); netdev_tx_reset_queue(dev_queue);
ring = priv->ring_data[i + h->kinfo.num_tqps].ring; ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
/* Continue to clear other rings even if clearing some
* rings failed.
*/
hns3_clear_rx_ring(ring); hns3_clear_rx_ring(ring);
} }
} }
int hns3_nic_reset_all_ring(struct hnae3_handle *h)
{
struct net_device *ndev = h->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hns3_enet_ring *rx_ring;
int i, j;
int ret;
for (i = 0; i < h->kinfo.num_tqps; i++) {
h->ae_algo->ops->reset_queue(h, i);
hns3_init_ring_hw(priv->ring_data[i].ring);
/* We need to clear tx ring here because self test will
* use the ring and will not run down before up
*/
hns3_clear_tx_ring(priv->ring_data[i].ring);
priv->ring_data[i].ring->next_to_clean = 0;
priv->ring_data[i].ring->next_to_use = 0;
rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
hns3_init_ring_hw(rx_ring);
ret = hns3_clear_rx_ring(rx_ring);
if (ret)
return ret;
/* We can not know the hardware head and tail when this
* function is called in reset flow, so we reuse all desc.
*/
for (j = 0; j < rx_ring->desc_num; j++)
hns3_reuse_buffer(rx_ring, j);
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
return 0;
}
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{ {
struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hnae3_knic_private_info *kinfo = &handle->kinfo;
...@@ -3302,7 +3427,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) ...@@ -3302,7 +3427,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret; int ret;
hns3_clear_all_ring(handle); hns3_force_clear_all_rx_ring(handle);
ret = hns3_nic_uninit_vector_data(priv); ret = hns3_nic_uninit_vector_data(priv);
if (ret) { if (ret) {
...@@ -3438,8 +3563,6 @@ int hns3_set_channels(struct net_device *netdev, ...@@ -3438,8 +3563,6 @@ int hns3_set_channels(struct net_device *netdev,
if (if_running) if (if_running)
hns3_nic_net_stop(netdev); hns3_nic_net_stop(netdev);
hns3_clear_all_ring(h);
ret = hns3_nic_uninit_vector_data(priv); ret = hns3_nic_uninit_vector_data(priv);
if (ret) { if (ret) {
dev_err(&netdev->dev, dev_err(&netdev->dev,
......
...@@ -104,6 +104,9 @@ enum hns3_nic_state { ...@@ -104,6 +104,9 @@ enum hns3_nic_state {
#define HNS3_RXD_L4ID_S 8 #define HNS3_RXD_L4ID_S 8
#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) #define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S)
#define HNS3_RXD_FRAG_B 12 #define HNS3_RXD_FRAG_B 12
#define HNS3_RXD_STRP_TAGP_S 13
#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
#define HNS3_RXD_L2E_B 16 #define HNS3_RXD_L2E_B 16
#define HNS3_RXD_L3E_B 17 #define HNS3_RXD_L3E_B 17
#define HNS3_RXD_L4E_B 18 #define HNS3_RXD_L4E_B 18
...@@ -622,6 +625,7 @@ int hns3_set_channels(struct net_device *netdev, ...@@ -622,6 +625,7 @@ int hns3_set_channels(struct net_device *netdev,
bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
int hns3_clean_rx_ring( int hns3_clean_rx_ring(
struct hns3_enet_ring *ring, int budget, struct hns3_enet_ring *ring, int budget,
......
...@@ -108,6 +108,10 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) ...@@ -108,6 +108,10 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
if (!h->ae_algo->ops->start) if (!h->ae_algo->ops->start)
return -EOPNOTSUPP; return -EOPNOTSUPP;
ret = hns3_nic_reset_all_ring(h);
if (ret)
return ret;
ret = h->ae_algo->ops->start(h); ret = h->ae_algo->ops->start(h);
if (ret) { if (ret) {
netdev_err(ndev, netdev_err(ndev,
......
...@@ -115,7 +115,6 @@ enum hclge_opcode_type { ...@@ -115,7 +115,6 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
HCLGE_OPC_STATS_MAC_TRAFFIC = 0x0314,
/* MACSEC command */ /* MACSEC command */
/* PFC/Pause CMD*/ /* PFC/Pause CMD*/
...@@ -484,6 +483,8 @@ struct hclge_promisc_param { ...@@ -484,6 +483,8 @@ struct hclge_promisc_param {
u8 enable; u8 enable;
}; };
#define HCLGE_PROMISC_TX_EN_B BIT(4)
#define HCLGE_PROMISC_RX_EN_B BIT(5)
#define HCLGE_PROMISC_EN_B 1 #define HCLGE_PROMISC_EN_B 1
#define HCLGE_PROMISC_EN_ALL 0x7 #define HCLGE_PROMISC_EN_ALL 0x7
#define HCLGE_PROMISC_EN_UC 0x1 #define HCLGE_PROMISC_EN_UC 0x1
...@@ -704,11 +705,14 @@ struct hclge_vlan_filter_vf_cfg_cmd { ...@@ -704,11 +705,14 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[16]; u8 vf_bitmap[16];
}; };
#define HCLGE_ACCEPT_TAG_B 0 #define HCLGE_ACCEPT_TAG1_B 0
#define HCLGE_ACCEPT_UNTAG_B 1 #define HCLGE_ACCEPT_UNTAG1_B 1
#define HCLGE_PORT_INS_TAG1_EN_B 2 #define HCLGE_PORT_INS_TAG1_EN_B 2
#define HCLGE_PORT_INS_TAG2_EN_B 3 #define HCLGE_PORT_INS_TAG2_EN_B 3
#define HCLGE_CFG_NIC_ROCE_SEL_B 4 #define HCLGE_CFG_NIC_ROCE_SEL_B 4
#define HCLGE_ACCEPT_TAG2_B 5
#define HCLGE_ACCEPT_UNTAG2_B 6
struct hclge_vport_vtag_tx_cfg_cmd { struct hclge_vport_vtag_tx_cfg_cmd {
u8 vport_vlan_cfg; u8 vport_vlan_cfg;
u8 vf_offset; u8 vf_offset;
...@@ -813,21 +817,13 @@ struct hclge_reset_cmd { ...@@ -813,21 +817,13 @@ struct hclge_reset_cmd {
#define HCLGE_NIC_CMQ_DESC_NUM 1024 #define HCLGE_NIC_CMQ_DESC_NUM 1024
#define HCLGE_NIC_CMQ_DESC_NUM_S 3 #define HCLGE_NIC_CMQ_DESC_NUM_S 3
#define HCLGE_LED_PORT_SPEED_STATE_S 0
#define HCLGE_LED_PORT_SPEED_STATE_M GENMASK(5, 0)
#define HCLGE_LED_ACTIVITY_STATE_S 0
#define HCLGE_LED_ACTIVITY_STATE_M GENMASK(1, 0)
#define HCLGE_LED_LINK_STATE_S 0
#define HCLGE_LED_LINK_STATE_M GENMASK(1, 0)
#define HCLGE_LED_LOCATE_STATE_S 0 #define HCLGE_LED_LOCATE_STATE_S 0
#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0) #define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
struct hclge_set_led_state_cmd { struct hclge_set_led_state_cmd {
u8 port_speed_led_config; u8 rsv1[3];
u8 link_led_config;
u8 activity_led_config;
u8 locate_led_config; u8 locate_led_config;
u8 rsv[20]; u8 rsv2[20];
}; };
int hclge_cmd_init(struct hclge_dev *hdev); int hclge_cmd_init(struct hclge_dev *hdev);
......
...@@ -39,7 +39,6 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, ...@@ -39,7 +39,6 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static int hclge_update_led_status(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo; static struct hnae3_ae_algo ae_algo;
...@@ -504,38 +503,6 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev) ...@@ -504,38 +503,6 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
return 0; return 0;
} }
static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev)
{
struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats;
struct hclge_desc desc;
__le64 *desc_data;
int ret;
/* for fiber port, need to query the total rx/tx packets statstics,
* used for data transferring checking.
*/
if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
return 0;
if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get MAC total pkt stats fail, ret = %d\n", ret);
return ret;
}
desc_data = (__le64 *)(&desc.data[0]);
mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++);
mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data);
return 0;
}
static int hclge_mac_update_stats(struct hclge_dev *hdev) static int hclge_mac_update_stats(struct hclge_dev *hdev)
{ {
#define HCLGE_MAC_CMD_NUM 21 #define HCLGE_MAC_CMD_NUM 21
...@@ -2916,20 +2883,13 @@ static void hclge_service_task(struct work_struct *work) ...@@ -2916,20 +2883,13 @@ static void hclge_service_task(struct work_struct *work)
struct hclge_dev *hdev = struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task); container_of(work, struct hclge_dev, service_task);
/* The total rx/tx packets statstics are wanted to be updated
* per second. Both hclge_update_stats_for_all() and
* hclge_mac_get_traffic_stats() can do it.
*/
if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
hclge_update_stats_for_all(hdev); hclge_update_stats_for_all(hdev);
hdev->hw_stats.stats_timer = 0; hdev->hw_stats.stats_timer = 0;
} else {
hclge_mac_get_traffic_stats(hdev);
} }
hclge_update_speed_duplex(hdev); hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev); hclge_update_link_status(hdev);
hclge_update_led_status(hdev);
hclge_service_complete(hdev); hclge_service_complete(hdev);
} }
...@@ -3586,7 +3546,14 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, ...@@ -3586,7 +3546,14 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
req = (struct hclge_promisc_cfg_cmd *)desc.data; req = (struct hclge_promisc_cfg_cmd *)desc.data;
req->vf_id = param->vf_id; req->vf_id = param->vf_id;
req->flag = (param->enable << HCLGE_PROMISC_EN_B);
/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
* pdev revision(0x20), new revision support them. The
* value of this two fields will not return error when driver
* send command to fireware in revision(0x20).
*/
req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
...@@ -4687,10 +4654,14 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) ...@@ -4687,10 +4654,14 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B, hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
vcfg->accept_tag ? 1 : 0); vcfg->accept_tag1 ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B, hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
vcfg->accept_untag ? 1 : 0); vcfg->accept_untag1 ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
vcfg->accept_tag2 ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
vcfg->accept_untag2 ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
vcfg->insert_tag1_en ? 1 : 0); vcfg->insert_tag1_en ? 1 : 0);
hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
...@@ -4814,8 +4785,18 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) ...@@ -4814,8 +4785,18 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
for (i = 0; i < hdev->num_alloc_vport; i++) { for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i]; vport = &hdev->vport[i];
vport->txvlan_cfg.accept_tag = true; vport->txvlan_cfg.accept_tag1 = true;
vport->txvlan_cfg.accept_untag = true; vport->txvlan_cfg.accept_untag1 = true;
/* accept_tag2 and accept_untag2 are not supported on
* pdev revision(0x20), new revision support them. The
* value of this two fields will not return error when driver
* send command to fireware in revision(0x20).
* This two fields can not configured by user.
*/
vport->txvlan_cfg.accept_tag2 = true;
vport->txvlan_cfg.accept_untag2 = true;
vport->txvlan_cfg.insert_tag1_en = false; vport->txvlan_cfg.insert_tag1_en = false;
vport->txvlan_cfg.insert_tag2_en = false; vport->txvlan_cfg.insert_tag2_en = false;
vport->txvlan_cfg.default_tag1 = 0; vport->txvlan_cfg.default_tag1 = 0;
...@@ -6010,9 +5991,7 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, ...@@ -6010,9 +5991,7 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
"Get 64 bit register failed, ret = %d.\n", ret); "Get 64 bit register failed, ret = %d.\n", ret);
} }
static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status, static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
u8 act_led_status, u8 link_led_status,
u8 locate_led_status)
{ {
struct hclge_set_led_state_cmd *req; struct hclge_set_led_state_cmd *req;
struct hclge_desc desc; struct hclge_desc desc;
...@@ -6021,12 +6000,6 @@ static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status, ...@@ -6021,12 +6000,6 @@ static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
req = (struct hclge_set_led_state_cmd *)desc.data; req = (struct hclge_set_led_state_cmd *)desc.data;
hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M,
HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status);
hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M,
HCLGE_LED_ACTIVITY_STATE_S, act_led_status);
hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M,
HCLGE_LED_LINK_STATE_S, link_led_status);
hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
HCLGE_LED_LOCATE_STATE_S, locate_led_status); HCLGE_LED_LOCATE_STATE_S, locate_led_status);
...@@ -6047,105 +6020,17 @@ enum hclge_led_status { ...@@ -6047,105 +6020,17 @@ enum hclge_led_status {
static int hclge_set_led_id(struct hnae3_handle *handle, static int hclge_set_led_id(struct hnae3_handle *handle,
enum ethtool_phys_id_state status) enum ethtool_phys_id_state status)
{ {
#define BLINK_FREQUENCY 2
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct phy_device *phydev = hdev->hw.mac.phydev;
int ret = 0;
if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
return -EOPNOTSUPP;
switch (status) { switch (status) {
case ETHTOOL_ID_ACTIVE: case ETHTOOL_ID_ACTIVE:
ret = hclge_set_led_status_sfp(hdev, return hclge_set_led_status(hdev, HCLGE_LED_ON);
HCLGE_LED_NO_CHANGE,
HCLGE_LED_NO_CHANGE,
HCLGE_LED_NO_CHANGE,
HCLGE_LED_ON);
break;
case ETHTOOL_ID_INACTIVE: case ETHTOOL_ID_INACTIVE:
ret = hclge_set_led_status_sfp(hdev, return hclge_set_led_status(hdev, HCLGE_LED_OFF);
HCLGE_LED_NO_CHANGE,
HCLGE_LED_NO_CHANGE,
HCLGE_LED_NO_CHANGE,
HCLGE_LED_OFF);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
enum hclge_led_port_speed {
HCLGE_SPEED_LED_FOR_1G,
HCLGE_SPEED_LED_FOR_10G,
HCLGE_SPEED_LED_FOR_25G,
HCLGE_SPEED_LED_FOR_40G,
HCLGE_SPEED_LED_FOR_50G,
HCLGE_SPEED_LED_FOR_100G,
};
static u8 hclge_led_get_speed_status(u32 speed)
{
u8 speed_led;
switch (speed) {
case HCLGE_MAC_SPEED_1G:
speed_led = HCLGE_SPEED_LED_FOR_1G;
break;
case HCLGE_MAC_SPEED_10G:
speed_led = HCLGE_SPEED_LED_FOR_10G;
break;
case HCLGE_MAC_SPEED_25G:
speed_led = HCLGE_SPEED_LED_FOR_25G;
break;
case HCLGE_MAC_SPEED_40G:
speed_led = HCLGE_SPEED_LED_FOR_40G;
break;
case HCLGE_MAC_SPEED_50G:
speed_led = HCLGE_SPEED_LED_FOR_50G;
break;
case HCLGE_MAC_SPEED_100G:
speed_led = HCLGE_SPEED_LED_FOR_100G;
break;
default: default:
speed_led = HCLGE_LED_NO_CHANGE; return -EINVAL;
} }
return speed_led;
}
static int hclge_update_led_status(struct hclge_dev *hdev)
{
u8 port_speed_status, link_status, activity_status;
u64 rx_pkts, tx_pkts;
if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
return 0;
port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed);
rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num;
tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num;
if (rx_pkts != hdev->rx_pkts_for_led ||
tx_pkts != hdev->tx_pkts_for_led)
activity_status = HCLGE_LED_ON;
else
activity_status = HCLGE_LED_OFF;
hdev->rx_pkts_for_led = rx_pkts;
hdev->tx_pkts_for_led = tx_pkts;
if (hdev->hw.mac.link)
link_status = HCLGE_LED_ON;
else
link_status = HCLGE_LED_OFF;
return hclge_set_led_status_sfp(hdev, port_speed_status,
activity_status, link_status,
HCLGE_LED_NO_CHANGE);
} }
static void hclge_get_link_mode(struct hnae3_handle *handle, static void hclge_get_link_mode(struct hnae3_handle *handle,
......
...@@ -563,15 +563,15 @@ struct hclge_dev { ...@@ -563,15 +563,15 @@ struct hclge_dev {
struct hclge_vlan_type_cfg vlan_type_cfg; struct hclge_vlan_type_cfg vlan_type_cfg;
u64 rx_pkts_for_led;
u64 tx_pkts_for_led;
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
}; };
/* VPort level vlan tag configuration for TX direction */ /* VPort level vlan tag configuration for TX direction */
struct hclge_tx_vtag_cfg { struct hclge_tx_vtag_cfg {
bool accept_tag; /* Whether accept tagged packet from host */ bool accept_tag1; /* Whether accept tag1 packet from host */
bool accept_untag; /* Whether accept untagged packet from host */ bool accept_untag1; /* Whether accept untag1 packet from host */
bool accept_tag2;
bool accept_untag2;
bool insert_tag1_en; /* Whether insert inner vlan tag */ bool insert_tag1_en; /* Whether insert inner vlan tag */
bool insert_tag2_en; /* Whether insert outer vlan tag */ bool insert_tag2_en; /* Whether insert outer vlan tag */
u16 default_tag1; /* The default inner vlan tag to insert */ u16 default_tag1; /* The default inner vlan tag to insert */
......
...@@ -382,6 +382,13 @@ static void hclge_reset_vf(struct hclge_vport *vport, ...@@ -382,6 +382,13 @@ static void hclge_reset_vf(struct hclge_vport *vport,
hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid); hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid);
} }
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
return tail == hw->cmq.crq.next_to_use;
}
void hclge_mbx_handler(struct hclge_dev *hdev) void hclge_mbx_handler(struct hclge_dev *hdev)
{ {
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
...@@ -390,12 +397,23 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -390,12 +397,23 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
struct hclge_desc *desc; struct hclge_desc *desc;
int ret, flag; int ret, flag;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
/* handle all the mailbox requests in the queue */ /* handle all the mailbox requests in the queue */
while (hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B)) { while (!hclge_cmd_crq_empty(&hdev->hw)) {
desc = &crq->desc[crq->next_to_use]; desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %d\n",
req->msg[0]);
/* dropping/not processing this invalid message */
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
continue;
}
vport = &hdev->vport[req->mbx_src_vfid]; vport = &hdev->vport[req->mbx_src_vfid];
switch (req->msg[0]) { switch (req->msg[0]) {
...@@ -470,7 +488,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -470,7 +488,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
} }
crq->desc[crq->next_to_use].flag = 0; crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq); hclge_mbx_ring_ptr_move_crq(crq);
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
} }
/* Write back CMDQ_RQ header pointer, M7 need this pointer */ /* Write back CMDQ_RQ header pointer, M7 need this pointer */
......
...@@ -1500,10 +1500,12 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev, ...@@ -1500,10 +1500,12 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev,
return ret; return ret;
break; break;
case HNAE3_CLIENT_ROCE: case HNAE3_CLIENT_ROCE:
if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_client = client; hdev->roce_client = client;
hdev->roce.client = client; hdev->roce.client = client;
}
if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { if (hdev->roce_client && hdev->nic_client) {
ret = hclgevf_init_roce_base_info(hdev); ret = hclgevf_init_roce_base_info(hdev);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment